From dfd13ac70d7b7f3d67949ab9bba7b1b079d0d72c Mon Sep 17 00:00:00 2001 From: "Daniel J. Magee" Date: Thu, 5 Oct 2023 12:00:56 -0600 Subject: [PATCH 1/3] Modified pavilion and stream --- utils/pav_config/hosts/crossroads.yaml | 11 ++++++----- utils/pav_config/tests/stream.yaml | 3 +++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/utils/pav_config/hosts/crossroads.yaml b/utils/pav_config/hosts/crossroads.yaml index 4e8b115c..e94ebcf9 100755 --- a/utils/pav_config/hosts/crossroads.yaml +++ b/utils/pav_config/hosts/crossroads.yaml @@ -6,14 +6,15 @@ variables: crayversion: "16.0.0" craympichversion: "8.1.26" partn: 'standard' + mpis: - { name: "cray-mpich", version: "{{craympichversion}}", mpicc: "cc", mpicxx: "CC", mpifc: "ftn", mpival: "cray"} compilers: - - { name: "intel-classic", version: "{{intelversion}}", cc: "cc", cxx: "CC", pe_env: intel, fc: "ftn" } - - { name: "intel-oneapi", version: "{{intelversion}}", cc: "cc", cxx: "CC", pe_env: intel, fc: "ftn" } - - { name: "intel", version: "{{intelversion}}", cc: "cc", cxx: "CC", pe_env: intel, fc: "ftn" } - - { name: "cce", version: "{{crayversion}}", cc: "cc", cxx: "CC", pe_env: cray, fc: "ftn" } - + - { name: "intel-classic", version: "{{intelversion}}", cc: "cc", cxx: "CC", pe_env: intel, fc: "ftn", blas_cflags: '-DUSE_CBLAS=1 -O3 -qopenmp', arch_opt: ''} + - { name: "intel-oneapi", version: "{{intelversion}}", cc: "cc", cxx: "CC", pe_env: intel, fc: "ftn", blas_cflags: '-DUSE_CBLAS=1 -O3 -fopenmp', arch_opt: ''} + - { name: "intel", version: "{{intelversion}}", cc: "cc", cxx: "CC", pe_env: intel, fc: "ftn", blas_cflags: '-DUSE_CBLAS=1 -O3 -qopenmp', arch_opt: ''} + - { name: "cce", version: "{{crayversion}}", cc: "cc", cxx: "CC", pe_env: cray, fc: "ftn", blas_cflags: '-DUSE_CBLAS=1 -O3 -openmp', arch_opt: ''} + # - { name: "gcc", version: "12.2.0", pe_env: "PrgEnv-gnu", cc: "cc", cxx: "CC", fc: "ftn", blas_cflags: '-DUSE_CBLAS=1 -O3 -fopenmp', arch_opt: ''} scratch: - name: xrscratch path: "/lustre/xrscratch1/{{pav.user}}" diff --git a/utils/pav_config/tests/stream.yaml b/utils/pav_config/tests/stream.yaml index fd438cc8..a6d813e9 100644 --- a/utils/pav_config/tests/stream.yaml +++ b/utils/pav_config/tests/stream.yaml @@ -98,7 +98,10 @@ _base: nodes: '10' # 'ALL' tasks_per_node: 1 share_allocation: false + + variables: ntimes: '10' + stream_array_size: '40' permute_on: - compilers From 1ec48e6b5807f5979bb08971be377a6f22dc7c14 Mon Sep 17 00:00:00 2001 From: "Daniel J. Magee" Date: Fri, 6 Oct 2023 12:49:49 -0600 Subject: [PATCH 2/3] Fixed STREAM docs, previous advice was wrong. MDTEST doc finished except results. --- .../09_Microbenchmarks/M1_STREAM/STREAM.rst | 14 +++-- .../09_Microbenchmarks/M8_MDTEST/MDTEST.rst | 63 ++++++++++++++++--- microbenchmarks/mdtest/README.XROADS.md | 3 +- parthenon | 2 +- sparta | 2 +- trilinos | 2 +- utils/pav_config/tests/stream.yaml | 20 +++--- utils/pavilion | 2 +- 8 files changed, 78 insertions(+), 30 deletions(-) diff --git a/doc/sphinx/09_Microbenchmarks/M1_STREAM/STREAM.rst b/doc/sphinx/09_Microbenchmarks/M1_STREAM/STREAM.rst index 88199844..136bb2bc 100644 --- a/doc/sphinx/09_Microbenchmarks/M1_STREAM/STREAM.rst +++ b/doc/sphinx/09_Microbenchmarks/M1_STREAM/STREAM.rst @@ -73,12 +73,13 @@ Adjustments to ``GOMP_CPU_AFFINITY`` may be necessary. The ``STREAM_ARRAY_SIZE`` value is a critical parameter set at compile time and controls the size of the array used to measure bandwidth. STREAM requires different amounts of memory to run on different systems, depending on both the system cache size(s) and the granularity of the system timer. -You should adjust the value of ``STREAM_ARRAY_SIZE`` to meet BOTH of the following criteria: +You should adjust the value of ``STREAM_ARRAY_SIZE`` to meet ALL of the following criteria: 1. Each array must be at least 4 times the size of the available cache memory. In practice the minimum array size is about 3.8 times the cache size. 1. Example 1: One Xeon E3 with 8 MB L3 cache ``STREAM_ARRAY_SIZE`` should be ``>= 4 million``, giving an array size of 30.5 MB and a total memory requirement of 91.5 MB. 2. Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) ``STREAM_ARRAY_SIZE`` should be ``>= 20 million``, giving an array size of 153 MB and a total memory requirement of 458 MB. -2. The size should be large enough so that the 'timing calibration' output by the program is at least 20 clock-ticks. For example, most versions of Windows have a 10 millisecond timer granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. This means the each array must be at least 1 GB, or 128M elements. +2. The size should be large enough so that the 'timing calibration' output by the program is at least 20 clock-ticks. For example, most versions of Windows have a 10 millisecond timer granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. This means the each array must be at least 1 GB, or 128M elements. +3. The value ``24xSTREAM_ARRAY_SIZExRANKS_PER_NODE`` must be less than the amount of RAM on a node. STREAM creates 3 arrays of doubles; that is where 24 comes from. Each rank has 3 of these arrays. Set ``STREAM_ARRAY_SIZE`` using the -D flag on your compile line. @@ -88,8 +89,11 @@ The formula for ``STREAM_ARRAY_SIZE`` is: ARRAY_SIZE ~= 4 x (last_level_cache_size x num_sockets) / size_of_double = last_level_cache_size -This reduces to the same number of elements as bytes in the last level cache of a single processor for two socket nodes. -This is the minimum size. +This reduces to a number of elements equal to the size of the last level cache of a single socket in bytes, assuming a node has two sockets. +This is the minimum size unless other system attributes constrain it. + +The array size only influences the capacity of STREAM to fully load the memory bus. +At capacity, the measured values should reach a steady state where increasing the value of ``STREAM_ARRAY_SIZE`` doesn't influence the measurement for a certain number of processors. Running ======= @@ -117,7 +121,7 @@ Crossroads These results were obtained using the cce v15.0.1 compiler and cray-mpich v 8.1.25. Results using the intel-oneapi and intel-classic v2023.1.0 and the same cray-mpich were also collected; cce performed the best. -``STREAM_ARRAY_SIZE=105 NTIMES=20`` +``STREAM_ARRAY_SIZE=40 NTIMES=20`` .. csv-table:: STREAM microbenchmark bandwidth measurement :file: stream-xrds_ats5cce-cray-mpich.csv diff --git a/doc/sphinx/09_Microbenchmarks/M8_MDTEST/MDTEST.rst b/doc/sphinx/09_Microbenchmarks/M8_MDTEST/MDTEST.rst index 74029613..3bd11eae 100644 --- a/doc/sphinx/09_Microbenchmarks/M8_MDTEST/MDTEST.rst +++ b/doc/sphinx/09_Microbenchmarks/M8_MDTEST/MDTEST.rst @@ -5,6 +5,7 @@ MDTEST Purpose ======= +The intent of this benchmark is to measure the performance of file metadata operations on the platform storage. MDtest is an MPI-based application for evaluating the metadata performance of a file system and has been designed to test parallel file systems. It can be run on any type of POSIX-compliant file system but has been designed to test the performance of parallel file systems. @@ -16,11 +17,19 @@ Characteristics Problem ------- +MDtest measures the performance of various metadata operations using MPI to coordinate execution and collect the results. +In this case, the operations in question are file creation, stat, and removal. + Run Rules --------- -Figure of Merit ---------------- +Observed benchmark performance shall be obtained from a storage system configured as closely as possible to the proposed platform storage. +If the proposed solution includes multiple file access protocols (e.g., pNFS and NFS) or multiple tiers accessible by applications, benchmark results for mdtest shall be provided for each protocol and/or tier. + +Performance projections are permissible if they are derived from a similar system that is considered an earlier generation of the proposed system. + +Modifications to the benchmark application code are only permissible to enable correct compilation and execution on the target platform. +Any modifications must be fully documented (e.g., as a diff or patch file) and reported with the benchmark results. Building ======== @@ -35,17 +44,53 @@ After extracting the tar file, ensure that the MPI is loaded and that the releva Running ======= -.. .. csv-table:: MDTEST Microbenchmark -.. :file: ats3_mdtest_sow.csv -.. :align: center -.. :widths: 10, 10, 10, 10, 10 -.. :header-rows: 1 -.. :stub-columns: 2 +The results for the three operations, create, stat, remove, should be obtained for three different file configurations: + +1) ``2^20`` files in a single directory. +2) ``2^20`` files in separate directories, 1 per MPI process. +3) 1 file accessed by multiple MPI processes. + +These configurations are launched as follows. + +.. code-block:: bash + + # Shared Directory + srun -n 64 ./mdtest -F -C -T -r -n 16384 -d /scratch/$USER -N 16 + # Unique Directories + srun -n 64 ./mdtest -F -C -T -r -n 16384 -d /scratch/$USER -N 16 -u + # One File Multi-Proc + srun -n 64 ./mdtest -F -C -T -r -n 16384 -d /scratch/$USER -N 16 -S + +The following command-line flags MUST be changed: + +* ``-n`` - the number of files **each MPI process** should manipulate. For a test run with 64 MPI processes, specifying ``-n 16384`` will produce the equired ``2^20`` files (``2^6`` MPI processes x ``2^14`` files each). This parameter must be changed for each level of concurrency. +* ``-d /scratch`` - the **absolute path** to the directory in which this test should be run. +* ``-N`` - MPI rank offset for each separate phase of the test. This parameter must be equal to the number of MPI processes per node in use (e.g., ``-N 16`` for a test with 16 processes per node) to ensure that each test phase (read, stat, and delete) is performed on a different node. + +The following command-line flags MUST NOT be changed or omitted: + +* ``-F`` - only operate on files, not directories +* ``-C`` - perform file creation test +* ``-T`` - perform file stat test +* ``-r`` - perform file remove test Example Results =============== -.. csv-table:: MDTEST Microbenchmark Xrds +These nine tests: three operations, three file conditions should be performed under 4 different launch conditions, for a total of 36 results: + +1) A single MPI process +2) The optimal number of MPI processes on a single compute node +3) The minimal number of MPI processes on multiple compute nodes that achieves the peak results for the proposed system. +4) The maximum possible MPI-level concurrency on the proposed system. This could mean: + 1) Using one MPI process per CPU core across the entire system. + 2) Using the maximum number of MPI processes possible if one MPI process per core will not be possible on the proposed architecture. + 3) Using more than ``2^20`` files if the system is capable of launching more than ``2^20`` MPI processes. + +Crossroads +---------- + +.. csv-table:: MDTEST Microbenchmark Crossroads :file: ats3_mdtest.csv :align: center :widths: 10, 10, 10, 10, 10 diff --git a/microbenchmarks/mdtest/README.XROADS.md b/microbenchmarks/mdtest/README.XROADS.md index 2783b547..90f065a6 100644 --- a/microbenchmarks/mdtest/README.XROADS.md +++ b/microbenchmarks/mdtest/README.XROADS.md @@ -57,8 +57,7 @@ node memory. The Offeror shall run the following tests: -* creating, statting, and removing at least 1,048,576 files in a single - directory +* creating, statting, and removing at least 1,048,576 files in a single directory. * creating, statting, and removing at least 1,048,576 files in separate directories (one directory per MPI process) * creating, statting, and removing one file by multiple MPI processes diff --git a/parthenon b/parthenon index 11c53d1c..c75ce20f 160000 --- a/parthenon +++ b/parthenon @@ -1 +1 @@ -Subproject commit 11c53d1cd4ada0629e06d069b70b410234ed0bde +Subproject commit c75ce20f938a4adaedb4425584954c3e74d56868 diff --git a/sparta b/sparta index 83d5f3a9..ca0ce28f 160000 --- a/sparta +++ b/sparta @@ -1 +1 @@ -Subproject commit 83d5f3a92c5fc0b59d4d973c6b1dddc4d77a7147 +Subproject commit ca0ce28fd76080d8b2828db77adde14fdc382c76 diff --git a/trilinos b/trilinos index f3ff0b54..5aaae1ad 160000 --- a/trilinos +++ b/trilinos @@ -1 +1 @@ -Subproject commit f3ff0b54c5158790295daff089ff0d286bda3c2c +Subproject commit 5aaae1ada6fe1ce777e671a0ff84fdc4f0779406 diff --git a/utils/pav_config/tests/stream.yaml b/utils/pav_config/tests/stream.yaml index a6d813e9..ba2c09d0 100644 --- a/utils/pav_config/tests/stream.yaml +++ b/utils/pav_config/tests/stream.yaml @@ -75,23 +75,23 @@ _base: STREAM ARRAY SIZE CALCULATIONS: ############### - STREAM - XRDS DOCUMENTATION: 4 x (45 MiB cache / processor) x (2 processors) / (3 arrays) / (8 bytes / element) = 15 Mi elements = 15000000 + FORMULA: + 4 x ((cache / socket) x (num sockets)) / (num arrays) / 8 (size of double) = 15 Mi elements = 15e6 ***************************************************************************************************** HASWELL: Intel(R) Xeon(R) CPU E5-2698 v3 @ 2.30GHz CACHE: 40M SOCKETS: 2 - 4 * ( 40M * 2 ) / 3 ARRAYS / 8 Bytes/element = 13.4 Mi elements = 13400000 + 4 * ( 40M * 2 ) / 3 ARRAYS / 8 = 13.4 Mi elements = 13.4e6 ***************************************************************************************************** BROADWELL: Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz CACHE: 45M SOCKETS: 2 - 4 * ( 45M * 2 ) / 3 ARRAYS / 8 BYTES/ELEMENT = 15.0 Mi elements = 15000000 + 4 * ( 45M * 2 ) / 3 ARRAYS / 8 = 15.0 Mi elements = 15e6 ***************************************************************************************************** SAPPHIRE RAPIDS: Intel(R) Xeon(R) Platinum 8480+ - CACHE: 105 + CACHE: 105M SOCKETS: 2 - 4 x (105M * 2 ) / 3 ARRAYS / 8 BYTES/ELEMENT = 35 Mi elements = 35000000 + 4 x ( 105M * 2 ) / 3 ARRAYS / 8 = 35 Mi elements = 35e6 scheduler: slurm schedule: @@ -295,7 +295,7 @@ spr_ddr5_xrds: "{{sys_name}}": [ darwin ] variables: arch: "spr" - stream_array_size: '105' + stream_array_size: '40' target: "xrds-stream.exe" omp_num_threads: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] omp_places: [cores, sockets] @@ -324,7 +324,7 @@ spr_hbm_xrds: "{{sys_name}}": [ darwin ] variables: arch: "spr" - stream_array_size: '105' + stream_array_size: '40' target: "xrds-stream.exe" omp_num_threads: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] omp_places: [cores, sockets] @@ -400,8 +400,8 @@ xrds_ats5: tpn: [8, 32, 56, 88, 112] arch: "spr" target: "xrds-stream.exe" - stream_array_size: '105' - ntimes: 20 + stream_array_size: '40' + #ntimes: 20 #omp_places: [cores, sockets] #omp_proc_bind: [true] numnodes: '1' diff --git a/utils/pavilion b/utils/pavilion index 69b2d45d..f502ca86 160000 --- a/utils/pavilion +++ b/utils/pavilion @@ -1 +1 @@ -Subproject commit 69b2d45d696e623127c106b50525ba65daa23d76 +Subproject commit f502ca86fa27f4bc894aa19232c9f1f42361e269 From 43e3cc0648600efe0bb7b0b6b348f5c365fb89b9 Mon Sep 17 00:00:00 2001 From: "Daniel J. Magee" Date: Fri, 6 Oct 2023 15:41:37 -0600 Subject: [PATCH 3/3] Including stream in repo w/microbenchmarks. Adjustments to paviion settings. Fixup stream makefile. --- microbenchmarks/stream/Makefile | 30 + microbenchmarks/stream/README.ACES | 74 ++ microbenchmarks/stream/README.md | 561 ++++++++++++ .../stream/img/broadwell-cache-structure.png | Bin 0 -> 52882 bytes microbenchmarks/stream/mysecond.c | 28 + microbenchmarks/stream/splunk-stream.xml | 98 +++ microbenchmarks/stream/stream.c | 586 ++++++++++++ microbenchmarks/stream/stream.f | 464 ++++++++++ microbenchmarks/stream/stream_mpi.c | 832 ++++++++++++++++++ microbenchmarks/stream/xrds-stream.c | 401 +++++++++ utils/pav_config/test_src/dgemm | 1 + utils/pav_config/test_src/ior | 1 + utils/pav_config/test_src/mdtest | 1 + utils/pav_config/test_src/osumb | 1 + utils/pav_config/test_src/stream | 1 + utils/pav_config/tests/stream.yaml | 25 +- 16 files changed, 3091 insertions(+), 13 deletions(-) create mode 100644 microbenchmarks/stream/Makefile create mode 100644 microbenchmarks/stream/README.ACES create mode 100644 microbenchmarks/stream/README.md create mode 100644 microbenchmarks/stream/img/broadwell-cache-structure.png create mode 100644 microbenchmarks/stream/mysecond.c create mode 100644 microbenchmarks/stream/splunk-stream.xml create mode 100644 microbenchmarks/stream/stream.c create mode 100644 microbenchmarks/stream/stream.f create mode 100644 microbenchmarks/stream/stream_mpi.c create mode 100644 microbenchmarks/stream/xrds-stream.c create mode 120000 utils/pav_config/test_src/dgemm create mode 120000 utils/pav_config/test_src/ior create mode 120000 utils/pav_config/test_src/mdtest create mode 120000 utils/pav_config/test_src/osumb create mode 120000 utils/pav_config/test_src/stream diff --git a/microbenchmarks/stream/Makefile b/microbenchmarks/stream/Makefile new file mode 100644 index 00000000..e98efebf --- /dev/null +++ b/microbenchmarks/stream/Makefile @@ -0,0 +1,30 @@ +# +MPICC ?= $(PAV_MPICC) +CC ?= $(PAV_CC) +CFLAGS ?= $(PAV_CFLAGS) + +FF ?= $(PAV_FC) +FFLAGS ?= $(PAV_FFLAGS) + +ifeq ($(MPICC),) + MPICC=$(CC) +endif + +all: xrds-stream.exe stream_f.exe stream_c.exe stream_mpi.exe + +stream_f.exe: stream.f mysecond.o + $(CC) $(CFLAGS) -c mysecond.c + $(FF) $(FFLAGS) -c stream.f + $(FF) $(FFLAGS) stream.o mysecond.o -o stream_f.exe + +clean: + rm -f *stream*.exe *.o + +stream_c.exe: stream.c + $(CC) $(CFLAGS) stream.c -o stream_c.exe + +stream_mpi.exe: stream_mpi.c + $(MPICC) $(CFLAGS) stream_mpi.c -o stream_mpi.exe + +xrds-stream.exe: xrds-stream.c + $(CC) $(CFLAGS) xrds-stream.c -o xrds-stream.exe diff --git a/microbenchmarks/stream/README.ACES b/microbenchmarks/stream/README.ACES new file mode 100644 index 00000000..01cb258e --- /dev/null +++ b/microbenchmarks/stream/README.ACES @@ -0,0 +1,74 @@ +======================================================================================== +Crossroads Memory Bandwidth Benchmark +======================================================================================== +Benchmark Version: 1.0.0 + +======================================================================================== +Benchmark Description: +======================================================================================== +The Crossroads Memory Bandwidth benchmark is a modified version of the STREAM +benchmark originally written by John D. McCalpin. The modifications have been made to +simplify the code. All memory bandwidth projections/results provided in the vendor +response should be measured using the kernels in this benchmark implementation and +not the original STREAM code. + +======================================================================================== +Permitted Modifications: +======================================================================================== +Offerers are permitted to modify the benchmark in the following ways: + +OpenMP Pragmas - the Offeror may modify the OpenMP pragmas in the benchmark as required +to permit execution on the proposed system provided: (1) all modified sources and build +scripts are included in the RFP response; (2) any modified code used for the response +must continue to be a valid OpenMP program (compliant to the standard being proposed in +the Offeror's response). + +Memory Allocation Routines - memory allocation routines including modified allocations +to specify the level of the memory hierarchy or placement of the data are permitted +provided: (1) all modified sources and build scripts are included in the RFP response; +(2) the use of any specific libraries to provide allocation services must be provided +in the proposed system. + +Array/Allocation Sizes - the sizes of the allocated arrays may be modified to exercise +the appropriate size and level of the memory hierarchy provided the benchmark correctly +exercises the memory system being targeted. + +Index Type - the range of the index type is configured for a 32-bit signed integer ("int") +via the preprocessor define STREAM_INDEX_TYPE. If very large memories are benchmarked +the Offeror is permitted to change to a larger integer type. The Offeror should indicate +in their response that this modification has been made. + +Accumulation Error Checking Type - the basic accumulation error type is configured for a +64-bit double precision value via the STREAM_CHECK_TYPE preprocessor define. This may be +modified to a higher precision type ("long double") if a large memory (requiring a 64-bit +integer for STREAM_INDEX_TYPE) is used. The Offeror should indicate in their response +that this modification has been made. + +======================================================================================== +Run Rules: +======================================================================================== +The Offeror may utilize any number of threads, affinity and memory binding options for +execution of the benchmark provided: (1) details of all command line parameters, +environment variables and binding tools are included in the response. + +The vendor is expected to provide memory bandwidth projections/benchmarked results using +the Crossroads memory bandwidth benchmark to each level of the memory hierarchy +accessible from each compute resource. + +======================================================================================== +How to Compile, Run and Verify: +======================================================================================== +To build simply type modify the file Makefile for your compiler and type make. To run, +execute the file xroads-stream. xroads-stream performs self verification. + +$ make + +$ export OMP_NUM_THREADS=12 +$ ./xroads-stream + + +======================================================================================== +How to report +======================================================================================== +The primary FOM is the Triad rate (MB/s). Report all data printed to stdout. + diff --git a/microbenchmarks/stream/README.md b/microbenchmarks/stream/README.md new file mode 100644 index 00000000..731676c9 --- /dev/null +++ b/microbenchmarks/stream/README.md @@ -0,0 +1,561 @@ +# Crossroads Acceptance + +- PI: Douglas M. Pase [dmpase@sandia.gov](mailto:dmpase@sandia.gov) +- Erik A. Illescas [eailles@sandia.gov](mailto:eailles@sandia.gov) +- Anthony M. Agelastos [amagela@sandia.gov](mailto:amagela@sandia.gov) + +This project tracks data and analysis from the Crossroads Acceptance effort. + + +## STREAM +STREAM, like DGEMM, is also a microbenchmark that measures a single fundamental +aspect of a system. +But while DGEMM measures floating-point vector performance, STREAM +measures memory bandwidth. +More specifically, it measures the performance of two 64-bit loads and a +store operation to a third location. +The operation looks like this: +```C +for (int i=0; i < size; i++) { + x[i] = y[i] + constant * z[i]; +} +``` + +Two 64-bit words are loaded into cache and registers, combined arithmetically, +and the result is stored in a third location. +In cache-based microprocessors, this typically means that all three +locations, ```x[i]```, ```y[i]```, and ```z[i]```, must be read into +cache. +Values ```x[i]``` and ```y[i]``` must be read because their values are needed +for the computation, but ```z[i]``` must also be loaded into cache in order to +maintain cache coherency. +With no other hardware support, this would mean the best throughput one could +hope for is about 75% of peak, because the processor must execute three loads +(one for each of ```x[i]```, ```y[i]```, and ```z[i]```) and one store (```z[i]```), +but it only gets credit for two loads (```x[i]``` and ```y[i]```) and one store (```z[i]```). +And, (2+1)/(3+1)=75%. + +But most most current architectures support an optimization feature, called a +Microarchitectural Store Buffer (MSB), that speeds up the write operation in +some cases. +When a processor executes a cacheable write smaller than a cache line, the cache +controller first checks whether the cache line is already resident in cache. +When it is not, it caches the partial line in the MSB and sends the request on +to the memory controller to fetch the cache line. +If the rest of the cache line is overwritten before the data comes back from +the memory controller, the read request is cancelled and the new data is +moved from the MSB to the cache as a dirty (modified) cache line. +In this way the cache controller avoids the inefficiency of retrieving the +extra line from memory. + +Another architectural feature that affects STREAM performance is +Non-Uniform Memory Access (NUMA). +This is best illustrated with a picture. +The following block diagram shows the processor, core, cache, and memory +structure of a typical Intel processor-based compute node. + +![DGEMM Results](img/broadwell-cache-structure.png) +***Processor, Core, Cache, and Memory Structure for Sandia Compute Nodes*** + +The illustration shows two processors, each with their own local memory (DIMMs). +High-speed communication links (QPI links) connect the two processors. +The two processors share a physical memory address space, so both processors +can access not only their own local memory, but also memory that is attached +to the other processor. +A program running on one of the cores in a processor requests to load or store +an address. +In the event the address misses cache, the request is forwarded to the System +Agent, which then sorts out whether the address is local or remote. +The System Agent then forwards the request to the appropriate memory controller +to be handled. +The important concept to understand is that access to memory that is local is +faster than access to memory that is remote. + +The next concept to be concerned with is how memory is mapped to a program. +Programs do not reference physical memory directly. +Instead each program uses virtual addresses to reference data it uses, and +relies upon the hardware and operating system to translate those virtual +addresses into physical, or hardware, addresses. +Most modern NUMA-aware operating systems, Linux included, use a "first touch" +policy for deciding where memory is allocated. +When a program allocates memory it is only given new virtual addresses to use. +The physical memory backing the virtual address is allocated the first time +the memory is read or written. +Whenever possible, Linux allocates physical memory that is local to the +processor that is running the program at the moment the memory is first +referenced. +Linux then works very hard to keep the process running on the same core so +caches remain loaded and memory stays local. +For most programs this policy works very well, but for some threading +models, such as OpenMP, obtaining good performance in a NUMA environment +requires some care. + +Because we are running an OpenMP-enabled version of STREAM, we must be +sure to schedule the parallel loops statically so the same iterations +go to the same threads (and therefore the same NUMA domains) every time. +This will ensure that memory references are always local, and therefore +of highest performance. + +Note that the raw bandwidth of the node is determined by the number, width, +and speed of the memory channels. +In the above illustration there are four channels per processor, but the +number will vary from one architecture to the next. +Each memory channel is the width of the data path of a DIMM, which is always +64-bits, or 8 bytes. +The frequency will vary with the DIMM, and it's measured in mega-transfers +per second, or MT/s. +(DIMMs use a differential clock, so the clock frequency of DDR memory is +always half of the transfer rate.) +Most systems at Sandia use memory speeds ranging from 2133 MT/s to 4800 MT/s. +A Haswell node, with four channels per processor, two processors per node, +and 2133 MT/s memory will have a hardware (or peak) bandwidth of: + +``` +4 channels/processor x 2 processors x 8 bytes/channel x 2133 MT/s = 136,512 MB/s +``` + +The peak bandwidth represents a least upper bound, a limit you are guaranteed +never to exceed. +The performance you measure is the throughput, and it will always be a value +below the peak bandwidth. +The ratio of the two values, throughput over bandwidth, is known as the +memory efficiency, and is a measure of how efficiently the memory transfers +data to and from the processor. +Modern processors typically have a memory efficiency of around 75% to 85%. +New memory architectures may have somewhat less, and rarely, an exceptionally +efficient system may have more than 90%. + +Yet another consideration is how the instructions are generated by the compiler. +In this case there are two issues, using vector units and how addressing is +handled. +Vector instructions reduce the number of instructions that need to be executed, +which reduces memory traffic. +AVX instructions use wider vectors than SSE4.3 vectors, so AVX instructions +provide better performance than using SSE4.3 instructions. +Similarly, AVX2 is faster than AVX, and AVX-512 is faster than AVX2. + +Furthermore, how array indexes are generated and used can also have an impact +on STREAM performance. +For example, using statically allocated arrays allows the code generation to +use a constant array base address for references to ```x```, ```y```, and ```z```. +But using dynamically allocated arrays forces the program to load the base +addresses into registers and index the arrays indirectly. +On the Haswell architecture, at least, this impacts performance by roughly 25%. +Fortunately, on other architectures the impact is less severe, or not at all. + +We also noticed on some systems a drop in STREAM performance with intel 18.0 +and later compilers. +We don't know with certainty what the cause is, but it is consistent, and we +suspect it might have had something to do with the way arrays are indexed. + +The clock speed of the processor will also impact STREAM performance by a small +amount. +Faster processor clocks allow the processor to generate new memory requests +more rapidly. +And because the compute nodes in each of the clusters are not perfectly +identical, this means some nodes will have faster STREAM throughput than +others. + +Our STREAM measurements are laid out in the following table. + +| Cluster | Family | Processor | Ch./Node | DIMM MT/s | Bandwidth | STREAM Static | Eff. | STREAM Dynamic | +| :-----: | :----: | :-------: | :------: | :-------: | --------: | ----------: | ---: | ------: | +| Mutrino | Haswell | E5-2698 v3 | 8 | 2133 | 136.5 GB/s | 119361.4101 | 87.4% | 90583.2 | +| Eclipse | Broadwell | E5-2695 v4 | 8 | 2400 | 153.6 GB/s | 130660.2840 | 85.1% | 131046.4 | +| Attaway | Skylake | Gold 6140 | 12 | 2666 | 256.0 GB/s | 186956.2034 | 73.0% | 185199.4 | +| Manzano | Cascade Lake | Platinum 8268 | 12 | 2933 | 281.6 GB/s | 221318.2715 | 78.6% | 221242.9 | +| cxsr | Sapphire Rapids | Sample | 16 | 4800 | 614.4 GB/s | 373103.3951 | 60.7% | 367872.5 | + +Worthy of note here is the low memory efficiency of the cxsr system. +The explanation for this is simple -- it is that these processors are +early pre-production samples, and not fully performance enabled. +The understanding is that these systems are only able to achieve about +80% of the bandwidth. +Taking that into account the memory efficiency would rise to almost 76%, +which is within the expected range. + +OpenMP has environment variables that allow you to control how threads are +distributed around the node. +Two such environment variables are ```OMP_PROC_BIND``` and ```OMP_PLACES```. +Their allowed values are: + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OMP_PROC_BIND<\TD> +
  true threads should not be moved
  false threads may be moved
  spread threads are distributed across partitions (i.e., NUMA domains)
  close threads are kept close to the master thread
OMP_PLACES<\TD> +
  sockets OpenMP threads are placed on successive sockets
  cores OpenMP threads are placed on successive hardware cores
  threads OpenMP threads are placed on successive hardware threads (i.e., virtual cores)
+ +A complete table of the STREAM results for various settings for Mutrino +using the intel/19.1.3 icc compiler is laid out in the following table. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Haswell
xroads-stream.c
stream_d_omp.c
OMP_PROC_BIND
undefinedclosefalsespreadtrue
OMP_PLACESundefinedintel/19.0.452020.2       
72461.6026
90637.8       
118811.0970
53463.0       
76455.0476
90631.3       
118860.3734
90583.2       
118852.3040
intel/19.1.353153.9       
73036.5731
90596.2       
119063.8735
54403.5       
72259.5228
90579.9       
119138.5591
90587.3       
119028.3251
socketsintel/19.0.490610.1       
118807.4161
90668.9       
118843.3588
90576.7       
118814.0770
90575.9       
118831.7846
90644.4       
118886.8698
intel/19.1.390616.6       
118996.6661
90607.7       
119004.9310
90620.7       
119029.9084
90583.2       
119013.0211
90641.9       
118989.4573
coresintel/19.0.490620.7       
118831.7846
90647.6       
118850.9008
90613.4       
118808.9936
90696.6       
118866.1630
90589.7       
118855.2861
intel/19.1.390627.2       
119081.4796
90620.7       
119058.5927
90620.7       
119089.0518
90511.5       
119087.4669
90645.2       
119097.1534
threadsintel/19.0.490593.0       
118792.6946
45169.7       
57789.0339
45171.3       
57786.6287
90600.3       
118754.5059
90627.2       
118831.7846
intel/19.1.390641.1       
119050.6723
45160.3       
58005.8595
45168.0       
58011.8348
90576.7       
119040.9934
90597.1       
118928.1295
+ +As can be easily seen from the table, certain combinations yield +significantly lower performance results than others. +For example, ```OMP_PROC_BIND``` set to ```false``` combined with +```OMP_PLACES``` set to ```threads``` or undefined causes the +memory throughput to drop significantly. +Furthermore, setting ```OMP_PROC_BIND=close``` and ```OMP_PLACES=threads``` +or leaving both ```OMP_PROC_BIND``` and ```OMP_PLACES``` undefined also +severely impacts performance. + +Notice that setting ```OMP_PROC_BIND=false``` or ```OMP_PROC_BIND=close``` +combined with ```OMP_PLACES=threads``` causes OpenMP threads to be allocated +all on the same NUMA domain, which cuts the memory throughput by half. +Setting ```OMP_PLACES``` as undefined while also leaving ```OMP_PROC_BIND``` +undefined or setting it to ```false``` causes a different problem. +It allows threads to migrate between NUMA domains, which causes some +references to be non-local, slowing down the performance overall. +Also note that these effects are specific to the Intel compiler in this +specific environment. +This behavior, especially the behavior when these variables are undefined, +can vary depending on the compiler vendor (e.g., GNU or Intel) and environment +(e.g., Cray or TOSS). + +It is also clear from the table that there is a significant performance difference, +about 25% to 30%, between ```xroads-stream.c``` and ```stream_d_omp.c```. +The only significant difference in the source code between these two versions is +that the memory is allocated dynamically in the ```xroads-stream.c``` version, +and allocated statically in ```stream_d_omp.c```. +This illustrates how minor changes to code generation, in this case how the +memory is addressed, can result in significant changes in performance. + +It also illustrates that this is a problem with the architecture, though, and +not necessarrily an indication that the code generation is done incorrectly. +It is a common requirement that data references be generated from addresses +that are dynamically specified. +Two examples are, when the memory is dynamically allocated, as is the case +here, and when the address is passed into a subroutine or function. +In both cases, building the code using a static base address is simply not +an option. +Thus it is up to the architecture to minimize the cost if it can. +Haswell imposes a penalty for referencing an address generated from +a base address in a register, while later architectures +(e.g., Intel Broadwell, Skylake, and Canon Lake) may not. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Canon Lake
xroads-stream.c
stream_d_omp.c
OMP_PROC_BIND
undefinedclosefalsespreadtrue
OMP_PLACESundefinedintel/16.0184007.8       
220570.2428
219376.9       
220631.2772
219018.9       
205437.9183
219439.1       
220537.6245
219281.3       
220634.2996
intel/21.3.0163099.4       
195830.6060
163276.6       
207675.2236
148348.4       
196058.5001
163276.6       
207625.9637
163311.0       
207601.3426
socketsintel/16.0219314.8       
220580.5136
219539.6       
220717.7499
189887.8       
186992.2348
219381.7       
220570.8470
219314.8       
220587.7641
intel/21.3.0163266.0       
207473.5135
163231.6       
207417.4041
163189.3       
207229.5250
163255.4       
207397.1053
163199.8       
207325.5570
coresintel/16.0219276.6       
220613.7490
219415.2       
220588.3683
219415.2       
204518.6163
219357.8       
220626.4415
219314.8       
220643.9717
intel/21.3.0163242.2       
207684.3285
163276.6       
207621.1461
163244.8       
207610.4410
163244.8       
207711.1123
163287.2       
207711.1123
threadsintel/16.0219458.2       
220666.9465
107706.2       
108841.0698
219095.2       
220471.8131
219357.8       
220613.7490
219520.4       
220717.7499
intel/21.3.0163252.8       
207404.0492
80412.3       
100545.1554
80415.5       
100557.8360
163231.6       
207397.1053
163242.2       
207363.4608
+ +It is interesting to note that (generally) the Intel 16.0 icc compiler +outperforms the 21.3.0 compiler, even though the 21.3.0 compiler has +had more development time to mature and improve. +It would be easy to expect that later is better, but in this case it +appears otherwise. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Sapphire Rapids
xroads-stream.c
stream_d_omp.c
OMP_PROC_BIND
undefinedclosefalsespreadtrue
OMP_PLACESundefinedicc216079.9       
274053.5539
371114.9       
371880.1053
215905.0       
214983.4133
372093.0       
371588.3942
373540.9       
372396.0083
icx218043.1       
219178.0822
371229.7       
370799.5365
218678.8       
215439.8564
373715.4       
371157.9354
373192.3       
370942.8130
socketsicc365352.4       
368156.8840
366524.1       
370221.7580
246305.4       
277893.0840
368154.6       
368595.0055
368154.6       
369813.7252
icx367928.9       
367393.8002
367309.5       
368451.3529
215265.9       
219729.9153
364686.2       
366902.3505
368154.6       
368451.3529
coresicc372266.2       
373536.0490
372902.4       
372447.6774
218878.2       
208423.4091
372497.3       
372740.7396
372960.4       
371245.7902
icx372555.1       
369017.8743
371689.6       
371014.4928
213808.5       
217539.0891
370828.2       
369728.4806
371920.0       
372670.8075
threadsicc372612.9       
372533.8243
186234.2       
186898.0616
218142.2       
214736.9122
372381.7       
372309.9251
371747.2       
372447.6774
icx372381.7       
370013.4901
186538.2       
186552.6623
218479.7       
214765.1007
372381.7       
370441.8291
372497.3       
370584.8292
+ +Size also plays a role in determining STREAM performance. +As with any program, the act of measuring the performance subtly alters the +behavior, and performance, of that program. +Especially on small microbenchmarks like STREAM, setting up timers and loops +takes time away from the activity being measured. +That set-up time needs to be amortized across the activity, and amortization +is more effective when the time spent in set-up is very small relative to the +activity being measured. +This all suggests that larger array sizes will show better performance than +smaller sizes. +But it is also the case that very large memory may require the system to use +more expensive measures to fetch the data, such as deeper page table walks. +So, tuning the benchmark for size, large enough to amortize the cost of timers +and loops, but not so large as to slow access down, needs to be done. +For the version that uses statically allocated memory (```stream_d_omp.c```), +the ideal memory size is the maximum the compiler allows, or 80,000,000 elements. +For the version that uses dynamically allocated memory (```xroads-stream.c```), +we saw improvements in performance up to around 100,000,000 elements. +After that, it took more time to run the benchmark but did not significantly +improve our results. + +It must also be noted that going with *very* small memory also gives faster +results, but doing so violates the benchmark run rules. +The STREAM run rules require that the memory size be at least 4x the size of +cache, or the results measure cache performance instead of memory performance, +which is not what is wanted here. +The actual size of STREAM memory is calculated as +``` +memory size in bytes = 3 arrays x 8 bytes / element x number of elements / array +``` +This value must be at least 4 times larger than the total available cache. +On our Broadwell processors there is 45 MiB per processor, or 90 MiB per node. +That implies we need a size of at least 15 million (15 x 2**20) elements. +``` +4 x (45 MiB cache / processor) x (2 processors) / (3 arrays) / (8 bytes / element) = 15 Mi elements +``` + +And finally, STREAM results are very sensitive to other activities going on +within the system, so STREAM needs to be run multiple times for an accurate +picture to emerge. +Even when the system is otherwise "quiet", the operating system services +interrupts and schedules maintenance tasks that steal minor amounts of +memory throughput from user programs. +Furthermore, this activity may force user threads to migrate from one +core to another, or from one NUMA domain to another, which may also affect +performance. +Running STREAM multiple times is the only way to see what the memory +throughput really is -- a statistical population of values rather than a +single value. diff --git a/microbenchmarks/stream/img/broadwell-cache-structure.png b/microbenchmarks/stream/img/broadwell-cache-structure.png new file mode 100644 index 0000000000000000000000000000000000000000..a723047075541361cbd17d42adc43dc3c13f2f02 GIT binary patch literal 52882 zcmeFZbyStzw?B#q3Mh&wEv1woB?3~Lttd!HcXxMpsDLyA3Q8*7-6<&`Ai}0Yy1Vzu@ecgjKH^K7pV-j0VPr3SIj`Uvr+h&r z(a;@nT*)l%<>XMa-R{?0DQ|DyXG99S5%@R+HT_&L{afL9KIB;+!Dj0rGO}~$;Yj## z4Pz}7Us6)g-21u@?VRkc!zVUa%h!}ojt|^eF-g=KQIUimJ_w+m(7iq6;h(MSBYg!4 zLqk39DFeaGC{|C{E!10w8d0d%{i%Yi1}M6yH8tZE(8`hBsjVJJo6$?7`Qdx*;P8euj6_}Aa1g` zis~{cO^9IL1L5NifsJZbkrYN%jXJz&+7STDp z_5PN_z1qH)ciOKvbiC74&=uLWzAQW!PW$oA)_@W9ZhYI)emO*(>xrZa~_E;$GB*BtH7=Ktxq+ zIs(9XYpN-d~6aG0ZSx)XgTnLYpFI|u%Sl^Pn9hYwyM zOOO+P#=7^ym`v~0xxGgZ#ZhixQa(%#M#AR9&?hJ$r1fn1K&#I#eenpb@L_V?C2>aU zi}T)IalK!u@liuHp2?H4c@wK&`0}!DOx_-L*WGH&xdcu1qFgQO>cbKO7^>QHvl^FG zD|-}Faf%z5IxqA1n8oi(Q_vE;2@Lrt8z&@=lR_wcK8+aRS>`p*R|$7{tko%fpV-8E zo8SB6#8+i&21nGg%PO8DL8kn%57qc~;=Mc7m~Z%#G+vnS)X>6aM*eb3ggFU0|7zDu z4!=?lg8GU&&D{GLs6FS2e4o|os)uW~=5XZ%^j$G#v_;~2UN>fK&h~&*nIiB-=!CDu zp!r1$Y704w7bREiu}830{I51rPc-a4az;HyIz~CBJw|s8i{odyZGY_+I(IF0tz)g~ zBd#$FjbJ7rX=+6*msh#K_E3gDv3M6S^PsoJh$OPnR{4Do3a(|jc> zd11a*89G&?T-m}Ni%b}yudb+8SDyZYfq*`{L9Fg4tcJ4jYb5)N}-&Pn|+XWXTew3TGn zhNgR&Wx7xh%FU=icb7 z;^7q2{NbJwn=%)R9rN%q_L8d(5snFt#yceqU$q}Ix8Tq4FvpyWq2md1aNV@r32`D` z+pimJ=ju9e-|14o&EL)6ZYNx2-X7as+@9M=JV@GiT^XFwn`~N3Uvru?8}HZ{ojG}F z(C|f_M7!#;`Q_jX_faeK9CF|0b0SN4kb6*i3_m1%Xz@_y;nf!bFHk>ieX1ijB^T!7 ze8l`=>;n;>S?*T9vMsUGgDH z8~s-Q>&RQAj};zI@gH&Haj_ekZVfecigvbsEZ0n7zNB@fzQPall?tg7WBWbPYrO>7 zgt6FacqcG&rVrd77(2-=9L5tVhAAW@jqWjUz#P=pKUc&#C{1G~l_cIs9*Qz!c3V5V zbjcb|oBlzSxDC19Sf{jz;iGwv?vALZqL_LV0ntS~CK0XUrcFK_hjSzcY3wQZ9N&L*npbydMJ3 z4^K^1vr>C0gnIAO9`D^Jj8hCt%DM%{h`i&eFH_}maJp}@)>-!z6bdYJ$P&~~XvfpO z$W-6p#C?lbr4pS}R{69#B~Jf)WU$%1pNwx}LvfFNcK;UrzEWrLu#rlSsnS8=OF7Hj z?%aZpn^pwHi{o~pqcX1SMGP(~JB~YBT*Bj2-kv45szY~RG-?jBI@%_)D~BkzcRQ}(9ow$^+Qd(cbLIqsGR1FNC^VuO(> z&nVl(H$tYza5aS0j3kZBl;oB4%I%!GI<|RxNmw{#U6dDg_pjGqe?r|TIvsm+F4(%_ z0p$bMucbK~BXjj#y&Ye8x?I=}7x%0_&4}7Dmy5>iaJVf#@AWM5^Y^026y*{@ksusnUI-yMqf9%6PE+w)^OgVv6H5$vHG!3a^K{%vRg-H zD^gt+_KU123@69R7wzI#gkiJJeJ8Kiau=j(ld0qVU=uJo*WCSY$E=47N4bl+%;*9w z?}CwOv}@+(kSemCY+7P}B*vnD7R8=gOnv#ivp@vdYOK9vs8|FSQsFUDJq{9Ffg>NF@`3VKM|mPQP&z*uAnk z@`Ae0o*~hKXEda9-Udi0;OQLrAp}3bJPz?fx(I$_gP+H7$UnbDm5n?9^Z7g}^x{J) zK0!h7TT07PN5|Ak-^@D7zD@;n)oUOvZ!P~!gj37Rgh4~wOjC!!-ozYgg2Zjl2_8*! ztTo8&O^i*gIPH0EoxZ^do}rH!Z;_q8Vr|58Oa7TS8K0S@4jC)MeFmmmFbpy>GHy$4 zT~5iz{6Ct5zj$uxTU(oRGBVoP*)iC$Fqm2DF*0*-a4<66W4w2d9=t(sG(O z8JHM<^$nVGL!WYr8`$d@%Re?S0owz{fHAYMv2mX^_%C1m9`dZIjFpZhpP310X$||m z!9N=R`Q?8$JRMWv_n7P)9Dfe^$2UJ(ax+3p|6?o8_WATvu+cCKZpL3kgJEng&JH0V z!I1W&ULRlyJ!7@8}STLLn%itWvQ=B^zExxbf=zKArU%PSQo_CA*jY`jj z^p828%f7F(+?T)-H>hQJicTZ*5RNSQA}si80tHrx_@fryl~1Mb24~uNtfs?eoMG%N zRxL^~D?{un&MWV%yzVMipj?W-n}vG~5-Nr4MKoQzzb^f0hU3-0%>u6Oj*Ojt9wndauM4M;t-Sc> zq<>AA34YE8aqfA4^gnm_#}L@*Bj^7*d;GYXoO8>hjbaZR;=KD}e>4`y@MsPvk@0n% zMfqbEFYwG8=f7jV`8DPr5AX(*3(k#(ZA}D3tmr{!L&5t|owbq|ul4Z$-huN_A2n?^ zAO3|v@CMZPFs;II(=xGN-^S%2x7RWl&)$zZn;L!s{QYRHUE@62i!t-x-Tq!%8ufR7 z#C(Ht&~F-3ARENwdW8L0Td<+b8L`cjd7HIi@3+oVFJe z$~5Y{JS`>~Ju9~d=rXGI=N0ZhB>vUl&xfba$U^Jqg&8Yvh;pyh=NzuLdNXS^zLZmS zp#1Wll}@lo)uI7sjE{6*+>lrj>ur3hw`&&^?%cWz-2P;=Cj(U{LqA7GHiU#hPjZ~QL z&n0^gnpN}E&u}f5Xg!#Xdr@oIN*}(s{`q#$%h;2{nXxf%(kn*Um9~9W9Xxdcu=Upi zW8^Uk2fQa6ykpsJM@E)kmdjU2s#ZN2DFzP)iu5}_uBNLU(GG!b#!6>mE)kiQX!PmD zy3{(29*hL$9n32=iN^fxJc^@v6j8_;-2>Ze{ff76a#kl?FwSeHB3EtN zT5n~%+QGdc@?b6@v=)jn9GK6iCR3RVIjvNN zE^qe~D~=jbk^DXlzZdzr8lDu6oHFxDu+v_5)^^aO_qBY$pqk4TVs$OZjhkbG1s2q+ z!*jS=S9iF#5t?AYO~e{g=~#2_;>+@tDz3vtJzino0H1FW(mb1A`9ky6SHQ>S!#2I1 zJC!RggOAb4+Z#H_Ad6<2|wtK{;x-s|ux%A$8g8T&+2O^<{hzj8oZp ztzj%F(iB4~S@c(*f5IdF3&amC+>h44kwO;FpO|Oc^?&{cBN_?PDdsOCDxzXtO zyJFnmC+!JV$ySe2sDndPWK-;epqTpxlJ|NU9vu|E{(10=;7LA+^sF-4ij$+2s|y+V zqJ3^BM|FnjnQx<*6-QGej1Q+6-3Z>Q*!E8@>@q9t0nYcbpPqTEWcqHAkP&Znc*T|7 z!4;61I*9oiVfcXD&cue>@r>IQr=>!<2$+jwi1Dz)Omtlk$2dtU{yJ7Y=abP zCqaXwYk_tYdZjUL$9rRqL}vcp)Jy(BE+&40h?2uqZ{Ewo^o7#v&iQR@m#$NZpmo3w zj4dx1=UJ}tsvO8Ft^8aB+#NoG>Uy-G=IwKxnrajSaN5!6YCP!?jq{=w=_RMdT!B}k zt9d_(oJJLFpvfB3P&)nI$V<$5{Cabt$jNqbwdXVT;et>H?sV(KH?s5ccpahvtzc3^ zxePF$=KhG!{%Tu6!b6O%nXs4-12(B|)*?p_sY*rQ71HoSpX;?H^$WRyBs`p@i|`5* zxJXUa49^O>oSKVGUS+mP?qXkdU(CMFP>DrztKz}9UzSVy=&047l-lizJq~j|pmRNc zx4rUY<>Z)guTk#WKvuvHvF33f^E@lg*)_W;qRJSpmdcZ1o4^{-nGxazDMs<8i#|r>2BlcFRNBE)|nQR~&WS~U>$ZriX7*ei2u+;WwjThcAf z=@2(QZ~7y2U$OV8znJ7q2Etq?IUBzr@?itz$QN3M702H4kb1+4EJG8Ai2(MH?4u?V zKJP2fs0=J5Wlp4o(|Q6GY!EVa>{V+4<^u-Flv520B|UFricAVR`31{04-ADxRvW8_ znilFzOUAqk5r0UCFrhfx+dcL#`bz3K_T3Jf++KncL0NB<*NCEbXe%$0)<{&+m2y};$FgJp8n!MXbDVwW!xTZ`kf8cMI@P2gAUa;iG9s+MncONl+BGAwMC z7-EdEaBL#Bqv%x*5bUr&pKc%Bvm3ruQZFbMMWF&@@R%2o=~F5z3zrJ%hiZf4o(B^O zdAWxVB>a~A*hhVPEdtz#v!gADT7R8FG%OlFxJY1(c`dr2o$ZiCQ=sXVbzKj?=`Nv| z=aw>ka@}(k*cLZh(L@U);tto9|THm%IO$yxl_nE_A*{E#8RUPNFM@VN@ z#bA5d4;FLk9;va%2k3obv&qkvdg$y{shV$(ZF#zCrt+>vr3zF*0=$=rHS4bo?5+i@;LS_Rvx6u+>3%oKtzrWEdWyyP)?SAj%z^l1p5#Q6c)`cbXT?Nmh@)uk zE8o>;RTwyG*%j+8V?aW=uH$X)axzqIEiGjpQ~t6V#V>pC>YOogQkU%osA2*jTGh;-j6C&4Mo27v@EUYmKJh4y(KXfdlo>1DkH@>U_C2s|9Go19wUPJLL>CSQ*D11JEnP#Y0++!Bh zJW^8cbgOrupArX9{az5+wUlA)qV2xx!b3e>+xFA?fY<@eUjf(CDLc+3 zj*a^-ckk|THR8+r^*qC6fT-2`{Ir%;fnoDReb0@G2kMCFP z&!@YL_!qs7oa)(^aT1|jz4~JU1|H;o*%}4IasEF)Zg+KHYxOec4M z+f{+*p*zyGHZ8eECE+s6MNai(!qsKCpeUQDs^>jXb}?axbV!|UL_6JeZjQfjYjA?{;(nQe>0Pdo$9A8C$+x^6M9#@A95D(}{vSPDZ*7a1G?kZ}LFl#=3)eBoP@oDbP0(@Du z)(0%;ob9|WEjRXfh`)1iXjZ(blgj*2W-F?98> z*o4Og;Mxz+e&{}B-g&@Pl6XvdPot$h2+$%}wB3i2X#E9Ep35!8q4=^*M_~ zn_u6S;sndLndsm5{|}yldiJOQfY3DSy@4lxxWYd>ftv!}up%PF{6kdzdW|d_@PhjhyoPND>75TJNX_yddxLS~902V$Yy;1PzYI2$UW#PF)*JXSs=we~&}-ajzBp+g zgkFpT)gsWcV?~3emmudiM7nahY@wDwFX~#=(UO_>L=Z2GbYAhHJDd{5U+@iDzUZmF z;qmZN1k2_3;^$D%5@^&#gsw?RB;wH^5|OHab44wPcySf>074}IQgMCE6HgVbiNwj+ zeyOlmh*X4_0#2re+?e3pz-M&22XIx?F9hp^1WUKXpMgoUPGt4Ap@&WTBO=YJIFx|} zsR0D^rQ}#wNAJcIiTGNJCgR>TPM8mRFMtKuhk$pdG5Ptm#zJQC6=s5R06S0|@BrIr zOn`S^;sF2?LXSwCS3j-3$#$j$@|r|rtk!c3XwD}kQ*V`Ohf1yLcGo8At_j?{CaY}G zAZlX|7F!5*2@zO5aw?%b_`&=L-{g^=Lsme;56u&o6?lgt6e?dimlPRNa?^Ep%%zcf zx%gnY+?##WnZ>veAv*>DeV-0KL>sg{zV1wnq{02aynu z94*^6a>?g?;C$cn6= zOihl#fLtd>f3b=pL*=p zE?FFvx=5O!{SSj_9~_!l;U+d@lssG}DjoW)s*jh#sB#sBlT_`7Z8VajZReR8*7#*C zfFmTDyp&yi>?oh3{!kvcuAG@G`yXvzqye_inBbQybZPV_kaC)b{Ci@z@61#VEI%AT z7xdkmUEzTyC}`S%el3d1E>BMB2Rqs8@ZPmK>JsImhd4ZX9J}Lj<+v|h?FAxZtZ}&$ z$m+(*E>=_;$4nWQJWzBqap`RW3X(|yCz}JiLpSq8!L)R$Yb+lG7omk&Xn*4 zGWB*Qq`T$VWM3`8aclLF5x?j0jY4*!cdvS{&CCmU_^gLryhl>IEpk=ZqMUOHSY{%H zo%}?e&~dAnAYMT`Sj-nI0nTq8%G!Ze3uo zVW(A&DTukl*#YI*vFU!Slmx8Q5uWRd4Vj{v0iAnlK%8tw-c`dmp99N8L8A9yWPv%=wIp% zUAVmD-h#e6Y`ARar(|XN<^Z)z1MouF)0z{PwxiLeOpapgqxo4OKa@jL)}8VO@HcK{ z0Lz^og7kMx=`a6giQPjFTlPmNny1-sYY@5-jLAA(bf5^%av#l{<}PKOnLZ80&}6xv z9K81`c_4>QU`;M@`69&9=}ZNe`QD@YEps8CG3Fn!$bKYmw)IP z0$}B};>)<)`t85{vhsifiJeYACG*4C`{x@vAO=aVCB5bIx8=MAOi&aiH};nUf)*wL zycK^51ohvJk0P*T2P9QJ|8Sgs4J*zM?8<)^`9DI*CCW75^}6D-=!!f55e3J7t>$?@ zhBBIr^5rs(&Dld0Q;3GvQ4{nyzO zn_Rr<%au)e5&~9UBbX-SSg3?YD!vDVp&uW02*(FDCd3>mmda`t;tfMO2;hfzzC_|_o%i`UN__yHdyr%I`!fXAx{OyKYjTHyEL-Ldtg^k zbsokur#h5>BDs^xiS`LZK&cP6vfVI2jO8TS+0%vW2?U>yfmr61NYgR{`@^-Sx@PKF z?%+D%{1wNAOgvGZt!{H*&w4$G064V1YE49Uh04-XEy-uKw;^p8rce{5vD(S1hhOml z8?75ysH{HIqyd6$gP7Y-eGm`PNbH6z1T_o#Z+T%7;Rf<-_f4*jo*eJA;Y!U;6oJ^E zITTXU*C2v0(FEwq>#vGz;5S+`UO%9~gWB=Iqz$w<$4z*U5qHb?Hn*|Hbx_1L7^~ z7C|)(xRjxUyt|*S{=XRR*@}CAzMeM#519_3&;sxkRd@G$t4EUFra-pm|9xmP$&lClu2sw(TQdtlSwP zZfc8UXIunk@K|Q?2!Bd+28g6Ag|+d3oeGYL3B>HzMF z>UdDy9;KnqywKJOeg$q*eH~m0VKz`-2^hH0u=s+%+T(S|+<#cFUOMzGvIo>ZS4R8#vPA~FJnmvW1;1(0Q~&92zp;RG0x8~f&r zzR3LOR}x6~1)F&sA7}ezvg^(AdL41RXW2B1tHiseRubaFAxHZiuY9yr!g9P6Kka1>XVE=D>~qLh#MU=62xpAFlGG!z{0FqLTBL(o$E|`G)j-Ryy3ave9bj>w55OB zk|u;s=|9W}z+gB4e$Ipk+x(93Lc;wAvs(izqwC*>{|h$9D^W?=RfvdpZ1V7a5}bZa zHcs8Z@B?J%;xU-Em2&t(RQIL(eo{592@GL&forp3H7l}ik9f>xdh&${dO5ZJFUFsE z8H+5nnxnMvrxwKr#d-X;>PPRn>`Zr;%-o2Qo<-tPi9}$r^=pTUq7DI9UE^DVpa$@D zF)!XUhvRcvH;?M>5A>u4`&O=&Bb00YW;eIkhIKH{33RW>S|ts=x1w8@NEFV%<-i{h zjT=S|SFu%qQYk`phd@Mb%$F`}wmTuh*k#;@I>ct8Dd_52a?xgCW>Jv9PsWBgJc0hZ zmB_%ymfP?+2@lHTNLI&}M<7y~S<{^=#kId(dx_DSJ4g)5iAoZS9m&mdby6GTyt4Wa zH-H{%l_Lo?#g_seM-H@rSH$AuI6`Eu#T-6)!_>sn#G`hum7&xw3zs31@XmMv@64is zG%ctn+x7pxr*LHbSIwd*Y2-NoW%0yQb7r1fY@y2@%IxY@b91p@E}el)i~hO&G>fLH zA{)BT82?xp1z4C+urE;3HBc@el@*eQ<)zIq@yx9UG4D8S7yAZUMM3JvLP0#*h3rvcQ_w11L{|-(CGgnEui;v9>XZd0g5R z9s(OH5}4?(DZoOAm)s`ucV{+Wazt zdX@g%@t-nP63EhL=OnKEZD*u~!Nw@A_59oLFQobZ-{vgOE`5CBR|^6G{c8i)0W%ax z0VWi6u{i_g#{v3%gIZQmC=LVIq_mGqS71UJ+e=a@ng4LtOm-;4m9b&KWb^`V8N+2# z7&qj(ohB_ru$!r7H}ygB9$jFlx214+ooZ%jF%XJ!WbV~Mb^z$<6bI*?(O}LYtDxU| zoCpppUHiGj@LGQY7RS5ekS7JDhRNIZ>(qh#Iy_bI*J^n6A*J(*)959unRZO-Xlu!! zCI!L&2ZKbi$Zp_;zAe-TZspjk+kt3M+yI0XtV%%kUoC`xCRF8jz{+*DXd}Qxc>rGL zDETuoKMPLmSDAVvWR!tB>LP~8uE+Cl<=fOm6nj1funo{eInJj-xZ^^~1VBbL*AGD` zLbP3RsqZ^F5Y>uzK@;Yj@zdZi@J7E72w^`RCKJa7&Yd9gJ4tp5S*hoD4Vf7$Rv&%a zJOvoF$blur1#F+CZaMZ-dxjVgS&0@bxOff?6#*fkRxDdlj;SBOX|2%7eIg9dpx7DGhWL zYB^`|+T}C9JADseA3+1& zjh_8xr$8M@$H1%Fh+dHve;AIA_1Vvz`3~djuUAe*RInLvkDQwX7SN=Dn3OJjwKJsV zgNRWPDrbxNz^PID(i|iddIz~IYK<-CcQBZ!+4v)Q9A3f?J)Qa~h~t38r1zQcMu7*? znU^{yp;`E7#REzLg@6QQkmvZ_Busbte#52@28V; z*&5^D(|!thHS1Dp+a4E5@Szi+eE^+YfQPG8b2z|szu@~IOlf6-cbEY>P~d1fRb3v2 z=n*W^bR&+!WGjHG6#_JBOQG_h!sr6xeTVZj3wcb)Nbfo#+qE#e4NBuCV+Axe48u6X z;}UU!{cU4!69^vzN7wU`f)Wu}uE~@f$0;xV(w1irPasH6v%BEkDb0T>lQS{tQ3OG9 zZ`7v#rc-V}klZVYF5SN=qX7t#lXZRbS4i$LuvTvIKa2dCZ}DgE`cSA>X;0>_xZ^e8 z5%}S=n*5PB@*8)|1m?!Si~Jv;BvVFaVR6nnz_d*@=6u24*VF*MI=_*3CficdfNYbw zojr=O4y0>~o^k;#FLJwlo7mlZ2F+hJTD2vEgaA8zjdfp3;UynDBRV+C4t%G$5IEmd zQ{)(X`h0Wk^3l$75_`dI8g$bYFy5P%EtBT^y-nV)>xA+4Z~7Wxdtmts@>E|koaiL* zYl^?t7$5UG1M)dNgur>(Bv~G)_%stS5@1XoTm(*&UFSf=WWLA>zs$h6KLX>)!MalH z6~>A~{#QXz>U4or92<#>F1Mi`e)qFeHo_nmk7VDyDfPFSjy~-o2kHXDnmE+? z?H!X8qUDHs!#e4P5$5A3I?F_6)KU}i)!VpSF^XbwpOICXZoAFVTi!1`t6wRQjaz^g z(&I=MjpR{Yu#Lu@yx_d$9ZLWq$X`RY|~?Sx_b&3_(6HyvS(QwG(})jb0?Cw zP+CC5mR9L;*IZD&A!lGIL*g+bZJ`&ZQ6bgXYb1}msQv;9hUsTg{)SRb#fQGNE=OeG zw^O~L|2VfPpvg+yH1zgc7Z@qv*GOn!Byrh`XZp+=jKndXf``%qfJItiimny!dc*ys zQrg_)>XdH6*$l+{mq4N9VKI^De8@Oc2I5!FZFpJMOE5wSEfLahpC*l7^+9Ytvy#$O zbCXDA{ibl1IaDA?HbT!5$nHYl7++;=e3nXrxCWdZ&fFd;7zsdEV3Yj<&WLQ(!o%j2X z;&zc?tv7hRG8aAS4e4wKmA6w~3wyT-WOZlaL}rh=s1!$MK=_d_GF_lNH^~;$_GSS? zgCWk3td|?&z+FMU@GKNPOuk0%9ri_b&#clLaEw>J{~=z_hE)Wz3E^8j@*^wtPk~1k z{2WNU{}m1E)`y0HlxVlw!00bxUZ`X=r`Mjn&v_Bq)$SYS^EDb3u|!C{!CfJcDag}- zZ$C*WBM{gNNAg6W-`MF5ecI*e4JQjC|Eh}6C)fQEpfU@=7Mv$n(b`#zQl%4=cbrK7 z-Hytnv$&Ooq_9yY724XbFkp4`^hdH6w97V6;V@AZG88o?^z`FqYU^d-(rXR~|I!-2 zAK-&PxeSsZWB!$Z`hb8>{35phM!>JOK%W(P!_S{7&3`Icn)iUZowt%I|3UK~;i&=@ zphB0(zasrjh5AA{cm=Nv82{Pf+4M~Cg5^z|(C+`9|L;BX_=x`We`J{+8YX5Q7Yanw z0j8)i_Hns^aW>C#HWlLcuvz>bJ}i9C%-c{z>Y8chz6#x z9O7@+ktjN0!WYg6rEn(aOBVJoEz^o^EPi!$SvfL1Xb9v3L;>h#vqpp z^W204p>NCCb|J9z^+$4ctpY>&($-wR3L`J!>g$VyC>hvA49Hg38xGFqr`pE`We|-k zr1H>P9KlFci<6)AK&F)q=x06w<1G0l5ogx!v>#}-3h0|$RLz;%IKD%OF5wn>yQS_p zD%Ab`R^NP?$j3JL=a8SFZ*q`uEF|2Kx_D;Sz}W`j3xAg-sRz_TY_=Zn(r2Yxj>=shd-+r>^PY)nCsht`7<-H0hh^)#`drv2Hp?UwT_=Y=h18VXHCpIiQOkUlpIQk zevQ(gcNef}(t%>mc#U8@9V)mA3q7t}VowPWPo$ z9}&;6H=y^(18zkN$(YAhwzXbHPnlZbWrxgBl8X%A87{)8~#Pe{_onH`dhfUP~jyAm*VEm4T!kI(E)>R` zWVji=&$@q6DB~?v7H~^;kxJ`+T8X_3>8rDSQG|=H-AAPS3&~MLOphuF==p zOEO<(tKEKzIygzSe1|{d%#{Xv&t&lsF;y@^TYyrZZ|g z+I=f8x#|jLX09xLKetHS8fa5rrWyU#Q*dhW;3@O+q*?_L_gr>bWa=Tvz^ovcc>Fkn(7mOfp)nx} z^|962szt%7FW}-^O?WsITAAenoN2>`=xhSm={o-NNc)?EqsMIh2QB~rLkgtb$y(a& z$V^W`F_L>7qvj{m!SF=YNQBmMv*64R(542k)$qVjNx7hE?<*PA+g;A(0E!#LIRer0 zj{@fIOF_Nrm7vI>Zm~bV4unw&8LD>2C%xnVuJ#Q{n09p80qQ>u>c9A`e`V1BFAq_h z5%gbPT57Sv<}i3m*1_-T#u76XtC?KUZA#fz+)mg1jbz@VJB!sP$I;sZ04d?C;D7oi z{6pPabnmEp92KkUL@xJkx-^1-qZy)6@%dI+bp_V(!uPhi`S@@n8JSYXofVc*H0CQ$ zd}Nj%4i3S!BqPtqIiB$;7U0vE^HWwL9Sjc23sxc|@!y;yq1C-v0;|h=QV@F#rEOo` z8L|=sbu#R=qsQCDr^UVOdHWNjC-@}x-#v1^3DIpyJ>p3DL}Ap~7J?RXo!WAd6LRR$*u+B1UDs zq-Q3An?G@njR2zC;A=oPKYiXK1>JW#wWpP^g;1=w!*x4f!;iiQEeN)+zuRW!dL7CR zaTTIjnSNd8?dih1-#+Ojp)iV;$}WSBh|#G}(+j_q#Y%<4YzPDaWT@qVa)H_5p`jAH z$O@Bd=@m~B_O|hV4>^l;z~a2SdRji~gRl?45=H~0%u+!|Kna!hol6E?NuKD}sa`%~ z>rd!Xx)rm;4Z`A+->&_LY3Vm@gUHNc*m@{!6Rl3m&<#D2mS;5g&tg84ia@VA*{5-{ zZ*wFa9DJx|UE2TBuU76`HXI?{{TZ`r7(4w1`&|)qNJ2nayS3#bTUmi@)qvB+z$OT) znw8BZ5ijt}aeNvu47WX*0SIrnjq-~fkmiFSr)=>gnW0I00i?dZ&LHA-bq0mwpysR4 zMRBXIiy}|P2GL?-mtnX>vks!kpF6v3&&-Z!caFN6jPwW4q}Fy`bub4gZ51Umr?S5@ z%nFyM*BI*HTB(3C`)%j@LAE*(R7-CTZYT5(a@~wSO>#G@+S4srt-VwyCC2kc`O0@F zbv#+C!d@xPRWnch)5o{D8RY}7%twlg1=}M`6ey2waNEpx2l7Cb<$*J6&CgLU)q>Pi zt{#Nhijh*&IRfrf$&7p7MKUq|16zB_ULo_I=-_5opA+@fSPmj11K9RGs;S#o9LzlH z4M%R@ci)C8WG@rkqc~02PUYPMrQVm}ox)Ex7%+8mHwVaG|86*Op8vDV1sxH+4HkX5oV zh9)XX084$8ac|PNj8;ps%lgWhrTPYBY3rMmq%%Pqn!6)T0<&W4`L?~RBVw5C)@f*F zl9ZxPh-$W?2itf@LEaE7)vj{)^{TLslNppvLD z;S@wTbtV{ryZ~Up21lb4nGrxcd&c)N%$n&dH~L|je@jDjhcBamQWQLF%)fmLU{z3x z5){0BYL=fFXFq&0U}JLuD{aX0@rzTj{@brQeeH8O@aINuC%F90pPLi~wx1n&C)p`a z|LXAUVVwa~-0YwkfO?7l+P3sMsJKyidjHjb40mcPR9pfTH$0C91peaf{ku%y_pBh- zt&h~`4zdG(qm5)9GKCTt0K z-*MmPgTf}kpp2#B+8NdVOtr$m56|mvw^K>v(z01k6qr(~c$OKtv43d(`A%rvoEj-9 z{}o1+zQKevMdTykWH_>ce7;3ZdWg=_azNP5^hL64f)6}op>Ofn7`9BM8t8!!@wA(< zdTVaAw$<$Au08};l|w0ucibh{FMP5iSkLr8L$C-p7t}JhZYS#JPm8gV@pEQMqSl&i0hd32mGkzY;rmv za1ED2=Bm_#!U3Fzz zE9=OW$a=#K+fv@VWN4(NpCf@djt*re$=OK8&`3>U3+Q1IV5G*Bq%M$8Wqs31K}gK) zSt@Jy5ByLYR8O=BDe|0=z7LSTAuzV*I!(O0MXJBVvt%*s^tlJqSz%Sj?U6zQ>2;^3>-QAv0mbK_?5^Y z=>*qG)>(}9pPr}3WgrJx{%!d0=KRY!|8)ocJstmw4lf;XZ`uK})i1)?-Y0hb z@xzA4(+WE_igxueL@x_viAM~gp;Wb}N^zacbDtZuzP)cuDA;>*W{qLo2D&yv++?yRw7CN}l#Z|ZHh(L|y^C>`n>k%dYsF#nO<-?i01;*Cz=tsaZfD-mw-yPJ zHqp5ecIJkz18+R|1mA$%BMluWZQM1Ft=>eW3)3SrN<5WOjIJT-Vgt}ZY(h6O>R{9tIM&>!G9bkZ*dMIp8R00t6u|zFEXXrRuQz0CH_1-i zmJ1uu8YD_M2aGqxI5u1a30=U4jZ>BG4$np|EPb?;+e>T$40AX?a^YIac7#M>T61bm zPE=9)oGx@*Kv&b-qnar>fYQwox!rMM-vVoXj#gkO@*YIDUtr^(D4Yac#>i>AC|0Fy zzGh~%AtM$ijojF!>1`D^VL-?0U1-=YzKCYPa}Igw1`~r#FX`e;&N3ygRsf5v-?e0E z#k~Q#js(fTM_{*uy+Aww^NG%>&i7acMIB+hjgC*_^pP7AKR(H@S)vCyhpcI7x-CXW zJK#D8z+4F~NnhX{-Kp>GM)nuJlvF%KmB`?9{ij=(2MS#2D{9^$Pes3VP8L*PH;j=T&!=mwORhwm|ID_h+frggkUn zjkxZ)34?d>>1JnkH7l4vriBd1@&bSBmqHoSb>OBb!tq`iF>qhtDgZFNAs8FF18@wW z6zEVb7yFPo-|n24+dw8gbOBZgxTBy}?PR~#9Bdl3Ua6{88|zqFde(fAS-5};|8_$&6p{&DmDnL>nWA=@%#ZoY&@V~JleJt+`Cp=?*q1Z5nQPe5!G1+z%p-u zsU>!~K5@QwJ#9vRG0Uq5;Z+p0WBSWc3)SqNXe7P277 zkTEE0_ZZEOvoekMZZ|RZ{K8F1IlZHOQ4=3Z`V6rc#?<7h7Eg`leXRK%HD~AHq$f6v zoWq~fVGq?tzstJ}Ge;kkCd8D{*4FNOHEi2)!W7nm3WWn#hTiUf!(CDAjd@}NE+I6M z;Q1ax8=`j!k3KcIE2&JqXGOAjpR~M}v^t^ZiOrF69$OX8{7%r5yZr+T%$4=KOYsXq zCjy}%-jrM2;Yv+g+wC?>8N8gZ(Iq7}a4$tFc~Q|ZV!X|UaUsTQ@6x;)WrOwM|6%XV zqp5Dg_tDx>BuP;sRFcdZQ068?A+k*wqRc}IWwz@rrO;r?Ttw`>%^S&7LXjctOc^5c zl$mWh_d|pC{dU$m=a1hxzq8iqkCtWIpU*Si&;8u@bzS$iZ3KQ|s(r{@*n# zT(ApSBhWS@p5VD)#jclaWH3`VW%Z9%cO{*tYvOeJcc+_Ge3!nm+VO;X*Xc7^DC)mU zk||{=FBqm|SvAg`wDMLd75Hc+D3ddMns|QcCZSE;)6E@|;SnX|E$mnIL^Lnr`B#f5 z?l(MDtdgc@xF@d^V{-D7nA=`|NPCWjZ8{4Eatr2T3hWRSh#tOMf1(XcuFHAWvVWWKpiTsqnGg z@l_AwkI_FgUBM7wXaL=@TQL+)XxJ<+TN@l|&lLgvjKgCRZ+ORZq26;SvwS&13%KT# zGCur2pu$P0okD6jhyYTXb11jLv{UoM{CzWGq-V=h>HXT`24= zZ{7nja)QrX5H;^+MIEIJ$=hwe0+p<$^FCjGwPy5zBInlo1wqm)Ynvrlf3wgskLlqufEDI z1PlM{tF|Yr1^lZ5N^xQiTRt?>!+fx}4MpL|ynDd^>YW}s==uSdGj)(5e zo0P%fdVd0+pT{=E<3{O|*iB1q3Sa0?%Z4@I41E`Ne1$_1eG6cWM5AiQWoT^Ko-N5? zH-QW4jYK_OtMr44D3b~tI9r6*ZYfw?0x6H9eZ7Esj~YA-{6xRQT62=ONtt<5+77A2 z9Xwi|*WP2l%@muZB&EH(c6!8bYTJi9g<8{Jch65h*V`_Wzmz;UC49Wd9+LOO>QPNzp|(D*{hb>#@%UgzSX%2 zfcP+jV_XoYj?y+q2ppO%ECGc8g?^&E_92IKPE2?*EHb1&tgbK z2&uBRB$r<@0Dx5V4{#XXd8@5xb<6F?B&CMZrXvg}_{?)u=-B@O`fM5Q^lFFU9ZGwR zqy*3GcD#*VP7}KiB&-8=GoM^;%kp=Yf4}@?M+jx%w^>R5kx^U;t)oBN%?*e0|FSO* zDHs8n{nFc8)C+@GP^FB_4h^Rl?p z+YOxICqCZh-{vt%L& zEtS7RV%C1Li!)b0atH656e88BpPA)c=v63!nR>??AHJ0XTv){U|L68*%2M;2ZAV}G zNZ7ZF?%A{J+vX39pnvwziw?tgYa`7onh(#v-nvydyz$*w>7>zGM#h+=4c;l)x6B_v}^4ajS$Ucwcf?~6pmJv-do`Q{kMtPaj zdfXyV6PpmkIrtIzl@XL~@q%ZNAxv=q$>}TZ1~w!zh3Ofv9wk7MyB17SK!-C`v`P6t zib%yV6y%ww=0Q+P1Go(j(wzS3fh7_Midqbpdpi7cIYE!yIA^Bg!QrA_g^4}=_m6~Y z1%2dPwZ@sSg;JlS2q|_3^gL>+(ODlez6lxj`9`Uni)hVPaUA&KBnWb&d4UBx@|Gyc zOHF!>NAeg>a;G#k*Vfj)nW#}_II2bKW!hz%Kil03ngn?_KA+xEF$M|a9G%5i>05gT z-2u_^tm{ENNfrqYyYL&n9U)TLq<7l;p5Csr&((l-aH#_fwcvN+Idyw>~Y4l+O!nw;6e&EYt}4%feF}_U{>W*pZJOH5e#oIRXN< zArX3@3h%l@*ZjFmeInJx^!c{g${Q~HZ@;ucR(&D~L@3Bo$N?ec=W>D;mj0xhtvW5M z+3el1MC^B3+;J-1$=QN$-h4|@_<6V# zJYnmPvvByFb#BzYPx*AXDt0Du4!D}n_>OUF2Jyza6`3$_Z&Thnrb<+lEde zalGbE(J2m}PG_IwK~F%WR2h_*j--mpUF63kSrJKCCn-$$=hia5rd;ioQa3g)M5$_= z?X|MD*mAs+=AV+O4UhM#?XrM-rfMK_nhsm1)ttR=nPp1`2)3JvF%C+D+==>?V%s23 zkUuZq)UhM3VdI`;VHJ-~wzdzMYWI4O27dQEC<)8ZHOY(QL~4Bxq4y9gM05uo#+uj`B-FfT4nHKW8@1D8+PsVf6Ak(0+29kmjS)D(7on6UE?6F66f7%ny!IurnVpGQ}G&n9-+)lEWY8#k@{rNHd3cq&eX+p0rlfmP~fr3SRlYg&r zKIEjVM_I2#dQkcZ=2U)WpF$oJ@A_uC^=m|xVV?og zF3u;}QMJnXqR9+lkvAbG0C0toRHktIIWG!A}d z5`ei%FE38~%BvAK@!Da~m(}(C;(U*+D&qxdkXKG0AJvR#&=ON0q-Aq`+kLz)S2&ga z_+m0qr{w(N`2o?gM5dJ_>herQ-=_Xl^r=W`zKR9-@1H?wKu^x{RJk zSM2m5Un2v;5nm?pbe{}hqQ=eSxC~xlb;ThuPYdAneXK0zw69H==br(I9Ty3>YHLto zqi%NG7f}7~&(uu+-QqrXkRqO#sk{A^?}E}e+lj6o6G^Xz+6;0ZM25xk6ZNs^*x6$_tu2!9Ts?n)+p!PV$MzcgK1c zQq%F(V2=!iWs@X5BBjMGB8HjLW@pnPZI|0^Tf=17><`yzko#+_U*u9`!Bql^hR6NK zZO%_FjJS6nc8DkVB*QZb zWj%uRkk@BymjcPo^l>;c-KQMqwAoq3y?m~n@6nE>6UyE_pg?tZ4tAT%tWPcH(oAV% zWEyg3GJCA3qUtYRugG|}k^k~ra`tD0 zWBsADi8XgGLVOx?J{R^FebA`XNb{wnzTZT(R*INWqtxg@oE>musF^<~%V9I*Dc~6) z+wDAFI^*LRJ71zVJ34>xRw!cwl!vjZg-cFEM{uO#=Ow5?BGnu`>F+sjvi$8A%}x-mg=bqw{6`{UOSD&5O%_KH<4tig4OB_KKw=;Vpj)06|E0F zdLsG&MmjLLe@d*j!B=L&a*g>H28r=*4)A+S30>@*xL`w%V=VyXYV?inFjZlk8I0-} zSWIOvtWg$@aGxyjyeiUhl5z*u|&IGy?KqcctY5Q(Bp1!$#mdzn}7Cq%pK!G(an| zlnbZ3Hfo8)8kwbAIEN>{=(o!9ns#`j5bah!9m&@g;^*l9sfe;M%zJY3N8-_+4vTK8 zZuoR!kl$lDs^cbhbB@H-n{a*RCvw>ja zl}0QzqqgtRR-~|*fnahD&7yvViqv9FlO$<>1zqoJL)$_eFPYhD*2yqx?FWuhRQyo7 zU2&j$Waf$U>@kcY49z^xynVabV+C-dj9@4^&9rz^HxpwdE1oZ25#e5bafHxpo84+m-{czfaxeAIy{CL)QiU6_oP7{{V;C zNJH3uwfe|k>BLj;;s5{2KLab5j>*f*RTduWW1+J6_o|4Mc29@fY4`v#&R55Es%q; zw}Abnvv|Go7!MK??t6H+l0~f*%_IP}dMutPXtgb`{=ylWv%8|ovp25HcJ)RaN|5v+ z$>t`wdDTLgJA4=puL%SSj{ufJADFlbzfwo!>|}rpL!9A!#|{e|^>#%_MW})hpn1?V zh|;=I;BFp->LN^hf2tZ+%IPU34OtPQd87oqSB(Ys!JVCa^eZtPc78ZyN;msLsu^W< zDVWr;39SN(W)3p+AfFww`DnY>1jHX=x%{QlblV{sR&^^8$#UgD8lVP1{51$)i@@O% zknWK<_w3Ni(0U}1;edzXwPkl+SYZng8B>f&qJEYuNe&yv5b*DOcK4&W2T?u()GxMM zFQ7ZNi;GJQI_XdZ+Ol~&+{PyE2)jL}xTNr;vQE8eTd<{o&QKUa!%o289~%M}%K|jO zI0^SU(adGQTTd_mH)^>*nKZBg?zEg<-d1u0RK`nniraWpKHW;t(s$HE3e&Za1q=HE z6}IApnVK=ySuIYGsv)<(kFlpwXHk)nOw3e>iQgHJ zC{~n!KuH+k971I?DZuiuFHurnc`grfv1wzX>Ct25qG@q6p^L!Z2z5U#i;3Q0aPv~8 z`XpGYGRxN>`JG-05OvY>19%-*SJ!m8AxP5IA~^*CwKwm#Dqo&U^XTF?JcA1Rgvxq) zIOX=0F{7S%)6Pf@r69=(^b$3ZwK(!E`jGBr)+LU{164ZOd1^3*5+|O=>ke@O6(pVQ zB!GGTI1);)4W>Sm?dZm$Se1_z3bqYs9PtAQWaYlk3Y?^ROU|9bFe8aDBQN~IomOU~ za?2L&p@;U>DyGNpN_2j{8XBDXvK;;=g01ygDE@HPlMpyhm*dheDLsGzes z)RQvN*Z3L^`J;OJIl$_n56xP-k7zNH89;@JKOhY2Kl&3!Mzvfjha$ghl}ii?EAS<) z4a{ixy$Z88<__qOC?P3E7toTgflF>gBt-oCCp)c@ks==jbm26lv2**du_b&B+Cwhj zw z4BRZy8|?sM^`^<=Ib)?qv%$ZK1J=?@6Tf5oDTlVQk&uM9(shU z-n6`ktM3&;;Via~V!NwAOLYCNa)$TY*WP}K;=#*DIW7&>ykPUa{*~e+ES|XfAi~2X zlK$T`@z!GQz!ggHdy2)86)XMgz(&gp!9})c+r;NvEw|%8f1sv_s~Jws`Rr}*=7Iel z1^LxsS+vu}Q-1BV!dGZ9Fh@ZM0YZH>v9xpo4bJqndzZWO^M!yO!F2*9=?!qAQfs<z5%O?eB8GL9kF=H*1|xggc#PXomH zPpYyxQ*SeRPte6?sYZo%UnzB|4Y;$Q$yt6-O&q7PJtU>@mc?Rju*B>8O|UOw40bhG6&zeZ#I~WFRd9<77lMso2qg5bA{p* zCYW>~sTZjasXs7_Cri%Y(`ypE?Y?trZxk^IRgAXAT%kx+l#AK7 zHzPIqy1xZRLK>Eq3!fk3fda7ev!&V$Z_XOE&bMY1QsKx#Y)&oQB67R2`0ll2mj4ZHb9L+h+Xqi-YPig8G`jFrGhOOUb zPAV*}uT-h3hzQo4`}$R@h9TJ_njEZ#p!7LPyBAaWWK_9|O&0yN^zTHcQ_*bVgX`-7 z-)&S%g5X2;e~au8NAy}*%!o+dt$bEP(LewnpZ+kMEYaLC3Nzxq4@pc%wmIZdSZsi}gA zi}W#}+9)8BtYt(hEQ;^@ZDGKDR0H$rbvy1OmF{jMfAaR-iMQn5P#L&68-zOcXaq`Y zx~6F+ZsYgAZV(bY>^V3~I5R4NO%@_G+~S=w2%y>0D}&2^tZCOJ`&%qNS`=PQw7)=y zI|SAy=n~W9b1Ulos=B))4v*(M)Zea}Ja}~WmLw&2vFQ>U4PlCLbXCB3Ertu36XfHR z9WdheI^>rq;}}?|(3)QMXraZrE@8cP8#xVFR9Qy79 z8*vr&VWC(9+s8TXLkVAtEZe5@^M{XZSbgTm8C{0&ab4H&0r3SYY`NGAO06Si8=vc| z;nAePfhpH=ap5qm-lfkuJ-a|@L6VAZDgO!IbjH(V-wsWU?g6k)C(ddj@r*)C+y>QJ z+V2=Wo0N;Xwh4E}f*U*kDQg>c&vbW>G67TdXF9#)tROI=+xcoc0BXTi80)o=#Cg&P zq{yi@*_2zjOJ%m)oXzN{yEC_I{9mI(r3;7a7ghz*mIG<<#>Wb(&9rqir*=Cg6z{*H zu>aiP8CRP;%L*OUfAuq(jHYC}CMV9A7B2o)xfwx)(HJ_F&__-lrAm#n({}mBp*ior zA0G#}u-r7w#DzswJAd-u7ubUxlJg zxOS&Vh$UJ6j1ugruB|nBO7>%+^e?VeO15u z{+AZOuO5U!4;tk!pz2D$8vgqSmg~@igFH?v_Mut@*IL(HS^fqv_kZ5@e{BY%i4gk< z)U*tY+8Yn4Hi0}SGNckU|D7k@cEVHtd%lI_s%LhA>Edq->1V_}hpF|hu}?CgN`j&Q z84#AZA`7|guaAaMYQF|hyH8Qw9tXIb(Lc+w=IMdcDxX45jsxfypCA#(#i9bW}GD6(c0ahSrn_``c5r#?@}z58&Kg(JBiq>-sH($WcOE_u|< z2|sOBuLNw@Z{A;}tZ#B1g8&wsK%Zjt-sPi2odu+L`#?1#vH6X%(CdAua3zYwB)jH+ z+TahEjtNM!??rdnK8Ud>Lw(*d41q!Fha3eWS`mOeJhZ>XKQIMx`lk->-=W64YW!`O z;phl&ld`_bW=7+1yYRhjZ>&s^f)R8<6$I`-VDPCYu0iChxAwK2`+#2K0IKxH4lwgVySuR#B^5|ksA%-knDCOibfoNk&4jl3ZgCCOD;ByF z!b#8Ki?{0uxPPNF{9tJ6&v4)Ew;C<~}{N&8TW_+ejEn#5ZQ=Llj3D;DhZVB*~S)DB6|LZI8OdW@fy(VxhI6`Zk4e+ZQYH{gYaWC; zo0M9ai1~8E#Tq?YfvJ3`+||4Amcipu*KC&OT$W`d0(w0Eb3 zGOADLEGiWqKK#IgkXFW*7wC1JGy_Vj!Cj3o%4ww^o-SR$kaY5IfbuHYWcKyVhp8+C ze}LZW+oh{G7mP`a*MGhTq1N0R0I#oXN`)xQba!{)5Uq>d>#m@+3yo0M4sQdg3L?Ho znm6C8YKdZ!J#Lmfp2e7DwedGcFd|E|d;e6i!nqU^=W zx1x}BcyJ@rS^LWNGN7f+6C#iIF=vXme$ZlxgbFY2tQ>e5e$Gw2Sp1Y~>a_{O zrAb2#?jJ#XZ!;$#RAh=j*tuRSwM-Nn{3Q;>UwE+zB?d$!`2oy@z#3F)Z?Hto-kSnd zm=7&LE$xp!ZYXY;nq?K+urvv1v2!Rh>Lb}uTL0aoXgd4x1#az7RMT6)a8ZG<%U3gT zY1(rs*ke4bEmrA~xFmTgP7iQ0OJVG7AO%1E!kvR3_i1nv3cPpAl+G30^YB(B?Du63 z|C|roi0^bBOk|rn_tmcjilLxxoJf_Co_l`3Izhr6B1UP~Q*Vi@6SlZ_KQl{nvr2kx z>Ljq(+g92OGe9oum%5*0Ja2sQofL(I-@~M91VWI~(WeG!^=7FLYI+>|fKA%`p?wYR z3GGB|WLbgF`vM-tpGOGpG29C1GY+G?&P>wP*k&;KZ*1NxSGUddFQ%0Xg`u);{pScB zwxgq@g&E2`fDsNw2Q$8UC>6*|umA`^MWr1S#3d(Zl(xo_c=MGwCrdlQR8uk_5os3R z$uEME1sc}0KL9r>doD^wn(k}~M+~F6)KEsP`V~=gpX3h;dL8&#R4p)#&GIz$!n!!d zt3*O>MH7>Fs85(`@#Nl_9cw=-ejfg?t^Q$hQoXW$Op+pG8u2$EcV7Tljv53JM6xKL z9U*yT?UxyfCshI%i_r;Gr%n_56gyt})XuGwtuJb6Hmdf;1EJ7B=W@N)C&ss@P<%}j zyT+`P%TUVSX2V`XoBp=ahV9Wh@l6!VD23e%0B!n2=p0yJ6!BYpADQ+!lE$*<_(<+o zRMJP_Lw7YEj5)-9?uK#q)&TjmqP1FfP`4<7j5YF;-K?_>{ltjiqHm<6J zV7G8H9VyLME^6U?=R63c>}2SJJaZYxxNIxsOV$+aUHRQkQSMV8ReFv$L@f-O@lMgh z9OaXeWX7t|CSxyl!<_5!b1Zr`{FWbsEfD&(Al zv$9z}ci)0E!em8;12{p)OEkZd^Z3Wxnp0lXSxwdB$n?tSXk!_pVa*5Q$yc+e!^fefZXl0%7FGE4pL9CAs)Jbt7X zzL3}0MWEdE%X&dSduPnvm+d~;&q^wJP%F$vbqhX>dzHr|FAT1AeKmp@j!3@4+cL4} z{}anrJn@=~s$*atWX_#4Rr`AY@j8efPt`p*b(R?1Vb%5+Mhc;=Cet2LwDX@FUXoC> zxs$4D6+08Ubd{H)_Ij;W4glC{K+ZeTn4$K-bY@nbP4$aZ?&Gm#=aSNxL(2uTF1F;7 zYz7St2amBPJQg$EIk)h6iGO-QMa%F@=5v=HoT{PrK6_`gg$m}!blT?XQH}CF)#Lrd z9;08*p*%0NeaEV{LtcqZA#b01M*vCr4H>UC_c86}GescEDsE6V4B z25kG`sM)hXxbaQnGxo0qNI-(FK9wERFqMr*c$0yXlc(0H!opOXEg33ews@)GHfajf zqjK9rPN?ifwgbMzNv?SFulB=5TOE9=`t_|+gO!&#rkdv7rVoRcH&kgad^#D*$xQdl zyke|=7UBbkm88fGVq|yg0{>S`*df?=Hnv&31#} z9|*N{U2Q1nxXb`w_s9jq!FblW$P)cwE0=8gl}wKH&oPIC8O5b}`0<3s3VE`4f#n8;cAG`%hAdb>e5v_ds>q&jKv- z)h8Lq-&tldG*!-zkCt*q=^H`y8&yD3)T+u#M}WYY&XTd@n%|KH2>g73kjW(J2pHXg zIlmu?%~aOZ)izuP)r;M8F7j)HRa5=D5!ImOlFSsgV*LH=yz`LB*FB3+SV!6Z@GD4v z3gP$nyCM`T(dFT~S%XC9*8;U$O2^%tU(?jddmX}h%X&B~$&pXcw zm{f40EQt@BNCadMb>zfuy(@Z|JzEDzmhj2ZetfbDg7v5ERTd;&$ec!Xdc#`g`7&8% z8cJn?k{}BRn|`l2%TLTu7u!6G%guwYe_bxw^>dL-QjfJofUMWt z`}4@0ylrEzbhs_2+J#R$=g!~~!;r_C9Wr^FOQOasMt|vZKAmK`JNtdCmwa*ak>UB) zB{;&}AO}qj#FO;zcQU$F+8j5bd;4AeeTuni9i8EcX~ORnncRl`Iq&Q+<|-0*|& z?+BtovNun?!q-+6ARz7=BNK0T-6ubv+_Yif`uB zqg*Uf7VF9fK#}zKEj3WjDAZeYK}>{UK-mzF=ue|C^{C%GX)fybx*e?vb{2}fx73BHV?UMDe zZ10||vI-#K3O<4uj{C0>3#t=xM&!9)A)nvRUjE@61a!q{_piRh|7jr=Ht#1nCV|>T zsYNnkb-8p1l2T!txkqyT{QaMW?M;Ro?g#gg@vc9avYm zA9r{3_Vu01%PZ=6y288p^^j1QAe%~M+rNw_70o`JrrpoNcmL`25fokD7c%&_HbaJe zl4B);!o9sWVWMLDN*dtLfI%#2TeDNw!TcWx)!fMsb?<Qd{X<>z zggDG3Rxh7K~@medNoqx6JxEAWF9GU?Tx(Bn9ZpKDQAMVV3qdf#-|UGpG9!E zTiW_4B#U_KTuwH$DY9_NXtygq%Vj{NH;0K!~C8GlI zzS!J@7AovhoklTQ?9;rd9na@eAkp-`mvh#yRc!@O0`2*X+9Pk*n`Z4jwX>h#{7y|G zMz-|E#NE?`<3^-=F<(xFEfm{g@E-}*VN`PpQ!WKGU7|XWsd+g6c;v%SnO<6au72&| z<)vHw9MiqgL|nvL!PF{J5OFUL>yiCOo#cERS%NqyF1mXX!>YCeqrV1KRuS$QK&1eLdWK|#aG_J?Ck z&5ze^o`k2wz*A0*`@4mSYy1kWS6jk5!#>EzayHisggTo>RzJj!JI{*QVdJ)LE3ez> z?tCs9oI1%o6W!3Xl&rT_*NS9KBy&F-9qXNI< z9y0vIc$**J7CLxEJyhn( z$FdFTP4 zwE7Z1a{dH!{#cU24`8DdY~%MWf2sE-jy73wG1pSNxKe*&CX7_xC9Zx{T_v{F-423hq?RaKt@_>XxODFtc*bTHs&l`8v+9RvZ5Z@oR$=1bc$3lV z;JA@A+Hz!BV1nl1OD}e8sk&Cns%`*uuFx{eFW2L*tv7}@GuHC1(2;+cgq3g0WuXBZ z_%N_iDP&cX(4=!gz@PnK#I5DOjUK1oKQR$!5JqrXl(X2gYT4XDF2mPH79y*D_q%aJ z@C=)2n(P%-g#WzYf5UHv9IaVzJ@I@emtf*WHfk|S^+?mr$!mt8Lmx_mTRsf2@_x)> ze0i;>@musjSrW3u0#b+T;VuBFqY&+A8sIWP9k^_rLoWKnE7NQ8+b#f6T51uYUEveYgZDRp8r_d5%bN-mA-!G8cfdWWxcMt6S+tMV0J+L9hH!W`oIWyY#OMOGBO*$qo zM1QYdfZ&1)Nn$E`wQbe!ez{^m+X6iIP0Kgi|MvKo;8qD1i}V9J=YM{;6WlS0$fl%~ zY9%Fl(OL0n#fj&zrC}|TtG4KG*vemfjxu=nP7;3ld4IlzUYLP^U%;nW**_8cwlw3XrkvCttkf<4)!hFZ zv-j&OD_xc21#_)@@Zm4@t>2A8KNQ|XD?UEcYGuXS@8~^~$1A2A_^(Gq2g1M->n^O+ z4Ei-_KcAz&2mE;EBEr+Z4Qz%wbS*ISXMKr(|3x%Eic%YgI6gtck)vsJ0&er<*4dUO z_n(OCmA@|x0i1x}PNL7>mK>C#D74g3z!v zqM5k7F?o^pUqw!-ba|j}63U`~OUYMrBfsQOb>ucA-c(9-KUQ&18;JM zgG%uc_aiiWKDl$Z+-%;~7wH{^A0$!w6GXLi1@b&*Pbbbwq3k&z@BL*Lj48GHo}5?5 z%5?lEj!kuo2A$Oi4gsa~Qo94<+!dLACNR!#Rg3~zH$ml$so z8@}F(U`<%mMgqtNg zF^wBqB6vx_nj3?^!!AK@7wpAQ;q>v%@KwkkOT*m^d#-e0I%-1&YY$h{1p}OWram{j zJ4Nue(+_<%!C-7?&X4Gx2(%M$^;8|41gADjU5SPG8wE}&ZRtljX92BQ0$5OMYj?8D zHMn{{ZVr4ND~&A|@u7$$VP?I%)pjuFvMQCnPGxFv%e$gZP=fgM&G6td!IZb5$}cCi z3}4vtn2`DbYh>ASeKR`0<=k=XVtRXu6YE@f{Yn9mjZ57D`3YAOEtc2NsSiUw2I@wn zUanUQHjF=>H*R;@VIybs%J3-lJlh(9GePc|IxlTiTS?E!iH?^5LF84q1v8sScF`eV ziSZpQxKspnlGGFRSAk|bc$^jzOws-Rxppd(cBa~zEssuox7x@K6}%>!7ytPn@#KS7 z$(97{LJB#NoAru5l3Q07%fG6yZsQw6p93drPQZEJ=j&#lIAF18U zgWSC6zUH|UT-7D&wy$NsR69)^jModxOeA0gC zl+PMneBYpurc1Ivd<-_KNQm4}q9$H*gT}RR;Jt)+`}(KjkDx3>uFhxWvjQ#~GNPb@ z=NLmpC&c>B{78)8Iyh!!mof10@eU!ykDiPEZOpMRD@3nfRF@_2D8Vr%Yu`?*G&#{iNgkUb$#@7(XC z#(;&}8b2&IwMixJmos{giE179wHQ6khOW>DlN$UJh=|X<&-+mnlx# z#J6iA?wdm*7|nRUhfaVNOu~5B@6;MTuTL0!di(shI$nWSx(F{#Mf31PVNd^oA53rs zkduWq?6I4DzDA0$*iJ+Ea>L$X6Sdn_zdg2e_^IcO2zB=RSTIkAKwsE~=(puz-l8VtM7QxF@Up>aJjj${wR+8j{ezt|5GTBlDs~7?>!Qz2*&13w@OAhHCglpY8mWQ z>w@wL>0Vkx_dlh!_S#IrbnZ$NtCzxI1wgD#y6)6XhBhu&^LQ zkJ+3_6u3vUlD_%zD%H<-UHbIeC|IO2;A3^>k85=o8FG~mvreFPK+-6Xs^5;nhW#H! zFpNii+R316_z01H9AH-B5&QYHnj_#zR7OR6zNz!yQjMYaqOX_lc_5fleS+>~(Pl}3 z2*4caDEJ-g4w-<8yPKO6kX{E}iM)|0ws1<7k;#{-K&noe@j?p}*G;$5zxtMXUE2G$ zFr9SMmw+-mNCat=b(NBTc3^ZQ*avCmQVGx`Ak@mo9vx!T25EZ>iwD%k$sh4aIebLm zTsPLIf)v8RL1tY>H&&Z9)(!Gypd>3j)xebpSoQM*g*8m%zLhz&diDGevu<>JJdX`59vx0&hG=>9 z#a2SzYs~=3$vTbnJlzd?#L#+n@nl`QyvZ7*C1LGo=`jzPIAuh7(}kM8%d|_*sXuoi zqf=n(xr{Z;cflenS^N5OFnFhpUi~WL7z7VXKjFJRd1WLBvj%+UPvNcv>x)@AQHimg zLC7%mfctD`yTnsnP`<~8I!+}b6b2*s68;#<+)I)0SQA%V17BzK?afM~5AOcq0~=+) zdt;a{co5Kgl`_L8Hxh!bUz|*&#m$?_z)N5B|LiuX0Byg}4w#e*T@G=5t9`|bngEx( z5f!Fg?2(Wff7SZgug#_UNztW`Yka&!b#Uh+8Y$k3NDqGuUY|&Fw*h?-z}gf zsJ3L?1J`pna))0;P?lml5?`>iJaUuynF5=e<>@tq(=A%(Hqd>Aa^N}xh*5c0@7ach zad>>|tJJb1*f6mt##aV5$j-bS+>b@zf*iDGU?m2_H?lGS#3ZhMM{3pXK(M?Z#k*pV zyCu=)M+Y-&3-%UTI1NzAk3DbnX|FY3XGGklroprEB5=I=ZWPSvZ~wHb|E|VDGl#fI zGgy!GFm3nY>ru)8Bj8L6(?ynYA$>C?#|iHn8!}c_tuqNbWQn z>({K<#Vp-+M14&BYWzSjG|?sFZ9JB$Fw`>T(;dc{VquT-3r!36c_KpqWLR^dc|6)Z zz_-p?ZK7qwI7bRgzo1PoYTS4&IbJL{J zKbR_+Icv&t9vCo<8HlZjplAu9KUY@5`tq--n6|mr>9iqkKs>%-;#baUGz~+=Xkuwv zOIGXT7@jQHcv?C{}_L;98HG!G)CDB%7wbmJNetx3^5f5w}Fa^&kq(`;aw*~AEw z?@(ivMZ+Zy2NZ&>4WLd2;-7y8BG(6Eb$KHoe~3h@&h&&|SJ8zcJKlUU?H7LF{PdR-{b?K9nrIJ~H)cA3dOi0OGhF=Hy`Y#&a=dOV=+kxc3B}V5MeUeRimGe)*Xm9}3!1u)B zzd7@O$LIL&KtMcwa%D|Md7xe#{nmMTy9)e~5c>~;+U!@M=xUmf!vuYn+kv~YeQ;IW<{HQlQUo24hJ zGhm)38d=*Po!oo|#@jB+>L4vlhjjc z)lO3@QXk0qerl42?buYb3g6R<;QHQ04>p54IPeH};LKr&3mkfsb1*}WKx4h1Om~DV z`Q_}71m?9Vrj*@w$Ee4WXL|A;Fegt=8TocDjCWt5^l%Eu<-ozvb($hI#gH*|tnW{C z3E&I>{f0l_boA2KGAV)pmn6jlF7A-6;bwZZ!#weZKu)VUJe@O_JNC=Jvb~ovlie35 z{Z5;Xi`cPZs3eZJlPcyUo{Kj9F&V3(IY1!;zYkH!{^Ncj2M|}e6@Vy&(Q0_k|McHB z?gqcl>v+wnzkMiya-r!ZC$0YH_kghJ1%BV!^=G#IEeO#o0DfO3D+BRwDDvnv;NViT z>^t{oQT^!_kgveOwUWyCYaNdNWgRs#HWO7q{jBD|`8q{>j--3DB98piht5H{bma~C zr&_>y_-wTOCBMwk2Zz*u$~`WRkDLxzm$E}oC_vKSfdGwDjXBaB>J$`1(M@H_h(_PHfnUXvbV5thOBsLiwCbe?3JTZx7d%>Q+nKegx3`sN zF|#W5smx+rM$KPHFrhncrW|6BY?2~4ALRyi6-12Is57R|v-UHtzS!et<`=OHl_xf7)QhII0;vh_)Y1$*{-1OG z5>9CTq3Z29CXPLqu=wpRXGQZDU{Z%TZy&V5F;pJx(wsd&MOjK~T$((P_>0g3%R^2N z7En{!NXJQ=+$w%3JKWq#>FN>F(Ov5hh>2IXGf#M;A=U~ z6OvgY?%nW0r(rn@olCG#=nh}tvU|l)`M8<)SSDt)+*DOnX~DB^S2I5GkB$j&V;~d& z+?a=FcLTkLC~-*kB~2Y<02rE(vqsPQ$<|#*A^xU5HEnvnxaLjX8vmJPQg@hvz~>J&?=MtbEu8nU|35_8#VuGVIPBqy}w>V z>u(>jaKgeFwRd9vo8eYM!Y41@*8TOv5(QYR|66wR;hjQ+GyQ^Ow*aSRCOhgDW|MhF zf!*;!4gjduUmOd+UU>#o9IYifA0AHZ`4@ZV;^fUd9#jWa*DS)ZhSiGjxDqb7mO*f} z=6$n>RKgISrhjx%)c{oXGvFR~VSEbyRV}k!e-E*QE*Sb4Z-q{p3y^`qGYxsBElf3Q z-PU4!-(+-~epw;{DHcyas9=+M`~L@T>qqWDpc8`!)W!5NZ$(Gsz%4BVjEEK zfV&Z!UMYcp?zaq?^m0m>559QqOb+1XYas|@n3YexvSa-v_PO zD|V;kIJ9EdBmG-)x1kmECrFab5R>^rp#5`OZ!t}hmBFB0?DU+*TI>mu(2YH?O+2K` z&Ug|6IcCip&w?<>5U@0gu-6OMZn*3W(dv>ZOsZzd@eS+&9Xse(trl}QtK|7#&1=K8 z$~TbULVE;dsKT|Hul0zJTfnuMo_(d?nYhEg)krC8(epC>_pjN*Z<8m1ccGlxQw^LUack$DzdMU${5Zbcdm=RiddEb9MYVINh zDN(o7S%MT+ZSKvdi!#0#Su$iO}c5&!r^jc7Qz~D7~ zDm|{%oZ-1bzkFM_>@~lwN~G};B^y1X-faPHElD%q2-*xZ3wWmTE#N&Lv$C4cAktDB@HjSU;7m{%u=ljp`K~D^j~*~| zclf%`sRUkDd-=HCw~ZyhCGX3Cx3}toPOtUdx?1$$@{4TX!2((B55Rqr`nThffQL4F zZ`@P@szE?I6uTbO?s1>(3p`q?Ix1)5(M?I(>vkkn0sE@=LqtIfs6i)GuTGuCdnSy# z3ei+vFdx`tvlr06tp*Hlvnp}53lo5qva`?Q=`VpNf}d&%2j&X*TfoEefww5HodPPA9Opd$oq@H5o$Hi@y(te z?M`0?<=f>yllPpM??1~#^ZYci*piE&Q$EwyC+Ac7 zbac>a?f=T3+;(T(o;Lq8qNA|@bo@%#;m>}+g!;NA@B$mtVeUOrjFNWqWgaVR0G_b7 z95`I@KJtTBlOu4r6L{pc$=`&m3}S@Wmh z;PO5Dsu(xycwME;2wV$N9aqx`+-9kIcE0`ohkY`@U4EIsojNZ!n;uYXmznhKZuxz? zC#IDR3%D zWYKy3eLs?}ox61)aBuR&OyJmcX|w-F{mp_V(HRR{H|=`#_5*N3C@)9D(T~8*yF45X zt_v;X7&k;*wYax3BiSrye%{OH9<^2*=NP^|5_l0hU%fz>wd-4T@LtRCy~&BWz&-5m zcO1JNvgLc)V_WTfzvQrc8{b};7rF7)O6AKAyR(>AReq~F{wPMy;_TGt-|QTpnsLMzOl1IGh?<;aCCn|K{`U>%Q0MQ-wu#^*BF+Ks>t zq!ln5mIlaRyP3D)KmbtE`K1L?XA<748`TLaO@SdK+-~(!hLLHm>IPt!#A>x;6!K30 z#!0|l%+#8O(*6S)z$W_Z2R|>YX?`RC?+OAL$AD*Z@gm@2eZ4 z-;dOxaF`5?ao?tsh+7GeI~$Fmz{6V*1uRlUM%mdKI;Vst080?{#Q*>R literal 0 HcmV?d00001 diff --git a/microbenchmarks/stream/mysecond.c b/microbenchmarks/stream/mysecond.c new file mode 100644 index 00000000..f6778b77 --- /dev/null +++ b/microbenchmarks/stream/mysecond.c @@ -0,0 +1,28 @@ +/* A gettimeofday routine to give access to the wall + * clock timer on most UNIX-like systems. + * + * This version defines two entry points -- with + * and without appended underscores, so it *should* + * automagically link with FORTRAN */ + +#include + +double mysecond() +{ +/* struct timeval { long tv_sec; + * long tv_usec; }; + * + * struct timezone { int tz_minuteswest; + * int tz_dsttime; }; */ + + struct timeval tp; + struct timezone tzp; + int i; + + i = gettimeofday(&tp,&tzp); + return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); +} + +double mysecond_() {return mysecond();} + + diff --git a/microbenchmarks/stream/splunk-stream.xml b/microbenchmarks/stream/splunk-stream.xml new file mode 100644 index 00000000..57c5f479 --- /dev/null +++ b/microbenchmarks/stream/splunk-stream.xml @@ -0,0 +1,98 @@ + + + Stream Benchmarks + + + OMP_NUM_THREADS + OMP_NUM_THREADS + * + + index=hpctest sourcetype=pavilion2 sys_name=$machine$ name=stream* omp_num_threads=* | +table omp_num_threads | +dedup omp_num_threads + $field1.earliest$ + $field1.latest$ + + + + + triad + copy + add + triad + + + + node + max(triad) + min(triad) + avg(triad) + stdev(triad) + max(copy) + min(copy) + avg(copy) + stdev(copy) + max(add) + min(add) + avg(add) + stdev(add) + |sort node + + + + |sort node + + + Stream Triad Performance by OMP configuration $machine$ + + index=hpctest sourcetype=pavilion2 sys_name=$machine$ name=stream* omp_num_threads=$OMP_NUM_THREADS$ | +rex field=file "(?<node>\w+\d+)-stream.out" | +chart max($calc$) min($calc$) avg($calc$) stdev($calc$) by node| +sort max($calc$) + $field1.earliest$ + $field1.latest$ + + + + + + + + + Stream Single Node $machine$ $calc$ Rate + + index=hpctest sourcetype=pavilion2 sys_name=$machine$ name=stream* omp_num_threads=$OMP_NUM_THREADS$ | +rex field=file "(?<node>\w+\d+)-stream.out" | +chart max($calc$) min($calc$) avg($calc$) stdev($calc$) by node $streamsort$ + $field1.earliest$ + $field1.latest$ + + + + + + + + + + HPL GFLOP/s Full System + + HPL Full System Performance $machine$ + + index=hpctest sourcetype=pavilion2 sys_name=$machine$ name=hpl-full* gflops!=null| +chart max(gflops) as "MAX GFLOP/s" min(gflops) as "MIN GFLOP/s" avg(gflops) as "AVG GFLOP/s" by valn| +sort max(gflops) + $field1.earliest$ + $field1.latest$ + + + + + + + + + + + + diff --git a/microbenchmarks/stream/stream.c b/microbenchmarks/stream/stream.c new file mode 100644 index 00000000..24edd556 --- /dev/null +++ b/microbenchmarks/stream/stream.c @@ -0,0 +1,586 @@ +/*-----------------------------------------------------------------------*/ +/* Program: STREAM */ +/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ +/* Original code developed by John D. McCalpin */ +/* Programmers: John D. McCalpin */ +/* Joe R. Zagar */ +/* */ +/* This program measures memory transfer rates in MB/s for simple */ +/* computational kernels coded in C. */ +/*-----------------------------------------------------------------------*/ +/* Copyright 1991-2013: John D. McCalpin */ +/*-----------------------------------------------------------------------*/ +/* License: */ +/* 1. You are free to use this program and/or to redistribute */ +/* this program. */ +/* 2. You are free to modify this program for your own use, */ +/* including commercial use, subject to the publication */ +/* restrictions in item 3. */ +/* 3. You are free to publish results obtained from running this */ +/* program, or from works that you derive from this program, */ +/* with the following limitations: */ +/* 3a. In order to be referred to as "STREAM benchmark results", */ +/* published results must be in conformance to the STREAM */ +/* Run Rules, (briefly reviewed below) published at */ +/* http://www.cs.virginia.edu/stream/ref.html */ +/* and incorporated herein by reference. */ +/* As the copyright holder, John McCalpin retains the */ +/* right to determine conformity with the Run Rules. */ +/* 3b. Results based on modified source code or on runs not in */ +/* accordance with the STREAM Run Rules must be clearly */ +/* labelled whenever they are published. Examples of */ +/* proper labelling include: */ +/* "tuned STREAM benchmark results" */ +/* "based on a variant of the STREAM benchmark code" */ +/* Other comparable, clear, and reasonable labelling is */ +/* acceptable. */ +/* 3c. Submission of results to the STREAM benchmark web site */ +/* is encouraged, but not required. */ +/* 4. Use of this program or creation of derived works based on this */ +/* program constitutes acceptance of these licensing restrictions. */ +/* 5. Absolutely no warranty is expressed or implied. */ +/*-----------------------------------------------------------------------*/ +# include +# include +# include +# include +# include +# include + +/*----------------------------------------------------------------------- + * INSTRUCTIONS: + * + * 1) STREAM requires different amounts of memory to run on different + * systems, depending on both the system cache size(s) and the + * granularity of the system timer. + * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) + * to meet *both* of the following criteria: + * (a) Each array must be at least 4 times the size of the + * available cache memory. I don't worry about the difference + * between 10^6 and 2^20, so in practice the minimum array size + * is about 3.8 times the cache size. + * Example 1: One Xeon E3 with 8 MB L3 cache + * STREAM_ARRAY_SIZE should be >= 4 million, giving + * an array size of 30.5 MB and a total memory requirement + * of 91.5 MB. + * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) + * STREAM_ARRAY_SIZE should be >= 20 million, giving + * an array size of 153 MB and a total memory requirement + * of 458 MB. + * (b) The size should be large enough so that the 'timing calibration' + * output by the program is at least 20 clock-ticks. + * Example: most versions of Windows have a 10 millisecond timer + * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. + * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. + * This means the each array must be at least 1 GB, or 128M elements. + * + * Version 5.10 increases the default array size from 2 million + * elements to 10 million elements in response to the increasing + * size of L3 caches. The new default size is large enough for caches + * up to 20 MB. + * Version 5.10 changes the loop index variables from "register int" + * to "ssize_t", which allows array indices >2^32 (4 billion) + * on properly configured 64-bit systems. Additional compiler options + * (such as "-mcmodel=medium") may be required for large memory runs. + * + * Array size can be set at compile time without modifying the source + * code for the (many) compilers that support preprocessor definitions + * on the compile line. E.g., + * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M + * will override the default size of 10M with a new size of 100M elements + * per array. + */ +#ifndef STREAM_ARRAY_SIZE +# define STREAM_ARRAY_SIZE 10000000 +#endif + +/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result + * for any iteration after the first, therefore the minimum value + * for NTIMES is 2. + * There are no rules on maximum allowable values for NTIMES, but + * values larger than the default are unlikely to noticeably + * increase the reported performance. + * NTIMES can also be set on the compile line without changing the source + * code using, for example, "-DNTIMES=7". + */ +#ifdef NTIMES +#if NTIMES<=1 +# define NTIMES 10 +#endif +#endif +#ifndef NTIMES +# define NTIMES 10 +#endif + +/* Users are allowed to modify the "OFFSET" variable, which *may* change the + * relative alignment of the arrays (though compilers may change the + * effective offset by making the arrays non-contiguous on some systems). + * Use of non-zero values for OFFSET can be especially helpful if the + * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. + * OFFSET can also be set on the compile line without changing the source + * code using, for example, "-DOFFSET=56". + */ +#ifndef OFFSET +# define OFFSET 0 +#endif + +/* + * 3) Compile the code with optimization. Many compilers generate + * unreasonably bad code before the optimizer tightens things up. + * If the results are unreasonably good, on the other hand, the + * optimizer might be too smart for me! + * + * For a simple single-core version, try compiling with: + * cc -O stream.c -o stream + * This is known to work on many, many systems.... + * + * To use multiple cores, you need to tell the compiler to obey the OpenMP + * directives in the code. This varies by compiler, but a common example is + * gcc -O -fopenmp stream.c -o stream_omp + * The environment variable OMP_NUM_THREADS allows runtime control of the + * number of threads/cores used when the resulting "stream_omp" program + * is executed. + * + * To run with single-precision variables and arithmetic, simply add + * -DSTREAM_TYPE=float + * to the compile line. + * Note that this changes the minimum array sizes required --- see (1) above. + * + * The preprocessor directive "TUNED" does not do much -- it simply causes the + * code to call separate functions to execute each kernel. Trivial versions + * of these functions are provided, but they are *not* tuned -- they just + * provide predefined interfaces to be replaced with tuned code. + * + * + * 4) Optional: Mail the results to mccalpin@cs.virginia.edu + * Be sure to include info that will help me understand: + * a) the computer hardware configuration (e.g., processor model, memory type) + * b) the compiler name/version and compilation flags + * c) any run-time information (such as OMP_NUM_THREADS) + * d) all of the output from the test case. + * + * Thanks! + * + *-----------------------------------------------------------------------*/ + +# define HLINE "-------------------------------------------------------------\n" + +# ifndef MIN +# define MIN(x,y) ((x)<(y)?(x):(y)) +# endif +# ifndef MAX +# define MAX(x,y) ((x)>(y)?(x):(y)) +# endif + +#ifndef STREAM_TYPE +#define STREAM_TYPE double +#endif + +static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], + b[STREAM_ARRAY_SIZE+OFFSET], + c[STREAM_ARRAY_SIZE+OFFSET]; + +static double avgtime[4] = {0}, maxtime[4] = {0}, + mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; + +static char *label[4] = {"Copy: ", "Scale: ", + "Add: ", "Triad: "}; + +static double bytes[4] = { + 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, + 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, + 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, + 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE + }; + +extern double mysecond(); +extern void checkSTREAMresults(); +#ifdef TUNED +extern void tuned_STREAM_Copy(); +extern void tuned_STREAM_Scale(STREAM_TYPE scalar); +extern void tuned_STREAM_Add(); +extern void tuned_STREAM_Triad(STREAM_TYPE scalar); +#endif +#ifdef _OPENMP +extern int omp_get_num_threads(); +#endif +int +main() + { + int quantum, checktick(); + int BytesPerWord; + int k; + ssize_t j; + STREAM_TYPE scalar; + double t, times[4][NTIMES]; + + /* --- SETUP --- determine precision and check timing --- */ + + printf(HLINE); + printf("STREAM version $Revision: 5.10 $\n"); + printf(HLINE); + BytesPerWord = sizeof(STREAM_TYPE); + printf("This system uses %d bytes per array element.\n", + BytesPerWord); + + printf(HLINE); +#ifdef N + printf("***** WARNING: ******\n"); + printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); + printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); + printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); + printf("***** WARNING: ******\n"); +#endif + + printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); + printf("Memory per array = %.1f MiB (= %.1f GiB).\n", + BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), + BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); + printf("Total memory required = %.1f MiB (= %.1f GiB).\n", + (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), + (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); + printf("Each kernel will be executed %d times.\n", NTIMES); + printf(" The *best* time for each kernel (excluding the first iteration)\n"); + printf(" will be used to compute the reported bandwidth.\n"); + +#ifdef _OPENMP + printf(HLINE); +#pragma omp parallel + { +#pragma omp master + { + k = omp_get_num_threads(); + printf ("Number of Threads requested = %i\n",k); + } + } +#endif + +#ifdef _OPENMP + k = 0; +#pragma omp parallel +#pragma omp atomic + k++; + printf ("Number of Threads counted = %i\n",k); +#endif + + /* Get initial value for system clock. */ +#pragma omp parallel for + for (j=0; j= 1) + printf("Your clock granularity/precision appears to be " + "%d microseconds.\n", quantum); + else { + printf("Your clock granularity appears to be " + "less than one microsecond.\n"); + quantum = 1; + } + + t = mysecond(); +#pragma omp parallel for + for (j = 0; j < STREAM_ARRAY_SIZE; j++) + a[j] = 2.0E0 * a[j]; + t = 1.0E6 * (mysecond() - t); + + printf("Each test below will take on the order" + " of %d microseconds.\n", (int) t ); + printf(" (= %d clock ticks)\n", (int) (t/quantum) ); + printf("Increase the size of the arrays if this shows that\n"); + printf("you are not getting at least 20 clock ticks per test.\n"); + + printf(HLINE); + + printf("WARNING -- The above is only a rough guideline.\n"); + printf("For best results, please be sure you know the\n"); + printf("precision of your system timer.\n"); + printf(HLINE); + + /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ + + scalar = 3.0; + for (k=0; k + +double mysecond() +{ + struct timeval tp; + struct timezone tzp; + int i; + + i = gettimeofday(&tp,&tzp); + return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); +} + +#ifndef abs +#define abs(a) ((a) >= 0 ? (a) : -(a)) +#endif +void checkSTREAMresults () +{ + STREAM_TYPE aj,bj,cj,scalar; + STREAM_TYPE aSumErr,bSumErr,cSumErr; + STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; + double epsilon; + ssize_t j; + int k,ierr,err; + + /* reproduce initialization */ + aj = 1.0; + bj = 2.0; + cj = 0.0; + /* a[] is modified during timing check */ + aj = 2.0E0 * aj; + /* now execute timing loop */ + scalar = 3.0; + for (k=0; k epsilon) { + err++; + printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); + printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); + ierr = 0; + for (j=0; j epsilon) { + ierr++; +#ifdef VERBOSE + if (ierr < 10) { + printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", + j,aj,a[j],abs((aj-a[j])/aAvgErr)); + } +#endif + } + } + printf(" For array a[], %d errors were found.\n",ierr); + } + if (abs(bAvgErr/bj) > epsilon) { + err++; + printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); + printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); + printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); + ierr = 0; + for (j=0; j epsilon) { + ierr++; +#ifdef VERBOSE + if (ierr < 10) { + printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", + j,bj,b[j],abs((bj-b[j])/bAvgErr)); + } +#endif + } + } + printf(" For array b[], %d errors were found.\n",ierr); + } + if (abs(cAvgErr/cj) > epsilon) { + err++; + printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); + printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); + printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); + ierr = 0; + for (j=0; j epsilon) { + ierr++; +#ifdef VERBOSE + if (ierr < 10) { + printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", + j,cj,c[j],abs((cj-c[j])/cAvgErr)); + } +#endif + } + } + printf(" For array c[], %d errors were found.\n",ierr); + } + if (err == 0) { + printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); + } +#ifdef VERBOSE + printf ("Results Validation Verbose Results: \n"); + printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); + printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); + printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); +#endif +} + +#ifdef TUNED +/* stubs for "tuned" versions of the kernels */ +void tuned_STREAM_Copy() +{ + ssize_t j; +#pragma omp parallel for + for (j=0; j +# include +# include +# include +# include +# include +# include +# include +# include "mpi.h" + +/*----------------------------------------------------------------------- + * INSTRUCTIONS: + * + * 1) STREAM requires different amounts of memory to run on different + * systems, depending on both the system cache size(s) and the + * granularity of the system timer. + * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) + * to meet *both* of the following criteria: + * (a) Each array must be at least 4 times the size of the + * available cache memory. I don't worry about the difference + * between 10^6 and 2^20, so in practice the minimum array size + * is about 3.8 times the cache size. + * Example 1: One Xeon E3 with 8 MB L3 cache + * STREAM_ARRAY_SIZE should be >= 4 million, giving + * an array size of 30.5 MB and a total memory requirement + * of 91.5 MB. + * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) + * STREAM_ARRAY_SIZE should be >= 20 million, giving + * an array size of 153 MB and a total memory requirement + * of 458 MB. + * (b) The size should be large enough so that the 'timing calibration' + * output by the program is at least 20 clock-ticks. + * Example: most versions of Windows have a 10 millisecond timer + * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. + * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. + * This means the each array must be at least 1 GB, or 128M elements. + * + * Version 5.10 increases the default array size from 2 million + * elements to 10 million elements in response to the increasing + * size of L3 caches. The new default size is large enough for caches + * up to 20 MB. + * Version 5.10 changes the loop index variables from "register int" + * to "ssize_t", which allows array indices >2^32 (4 billion) + * on properly configured 64-bit systems. Additional compiler options + * (such as "-mcmodel=medium") may be required for large memory runs. + * + * Array size can be set at compile time without modifying the source + * code for the (many) compilers that support preprocessor definitions + * on the compile line. E.g., + * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M + * will override the default size of 10M with a new size of 100M elements + * per array. + */ + +// ----------------------- !!! NOTE CHANGE IN DEFINITION !!! ------------------ +// For the MPI version of STREAM, the three arrays with this many elements +// each will be *distributed* across the MPI ranks. +// +// Be careful when computing the array size needed for a particular target +// system to meet the minimum size requirement to ensure overflowing the caches. +// +// Example: +// Assume 4 nodes with two Intel Xeon E5-2680 processors (20 MiB L3) each. +// The *total* L3 cache size is 4*2*20 = 160 MiB, so each array must be +// at least 640 MiB, or at least 80 million 8 Byte elements. +// Note that it does not matter whether you use one MPI rank per node or +// 16 MPI ranks per node -- only the total array size and the total +// cache size matter. +// +#ifndef STREAM_ARRAY_SIZE +# define STREAM_ARRAY_SIZE 10000000 +#endif + +/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result + * for any iteration after the first, therefore the minimum value + * for NTIMES is 2. + * There are no rules on maximum allowable values for NTIMES, but + * values larger than the default are unlikely to noticeably + * increase the reported performance. + * NTIMES can also be set on the compile line without changing the source + * code using, for example, "-DNTIMES=7". + */ +#ifdef NTIMES +#if NTIMES<=1 +# define NTIMES 10 +#endif +#endif +#ifndef NTIMES +# define NTIMES 10 +#endif + +// Make the scalar coefficient modifiable at compile time. +// The old value of 3.0 cause floating-point overflows after a relatively small +// number of iterations. The new default of 0.42 allows over 2000 iterations for +// 32-bit IEEE arithmetic and over 18000 iterations for 64-bit IEEE arithmetic. +// The growth in the solution can be eliminated (almost) completely by setting +// the scalar value to 0.41421445, but this also means that the error checking +// code no longer triggers an error if the code does not actually execute the +// correct number of iterations! +#ifndef SCALAR +#define SCALAR 0.42 +#endif + + +// ----------------------- !!! NOTE CHANGE IN DEFINITION !!! ------------------ +// The OFFSET preprocessor variable is not used in this version of the benchmark. +// The user must change the code at or after the "posix_memalign" array allocations +// to change the relative alignment of the pointers. +// ----------------------- !!! NOTE CHANGE IN DEFINITION !!! ------------------ +#ifndef OFFSET +# define OFFSET 0 +#endif + + +/* + * 3) Compile the code with optimization. Many compilers generate + * unreasonably bad code before the optimizer tightens things up. + * If the results are unreasonably good, on the other hand, the + * optimizer might be too smart for me! + * + * For a simple single-core version, try compiling with: + * cc -O stream.c -o stream + * This is known to work on many, many systems.... + * + * To use multiple cores, you need to tell the compiler to obey the OpenMP + * directives in the code. This varies by compiler, but a common example is + * gcc -O -fopenmp stream.c -o stream_omp + * The environment variable OMP_NUM_THREADS allows runtime control of the + * number of threads/cores used when the resulting "stream_omp" program + * is executed. + * + * To run with single-precision variables and arithmetic, simply add + * -DSTREAM_TYPE=float + * to the compile line. + * Note that this changes the minimum array sizes required --- see (1) above. + * + * The preprocessor directive "TUNED" does not do much -- it simply causes the + * code to call separate functions to execute each kernel. Trivial versions + * of these functions are provided, but they are *not* tuned -- they just + * provide predefined interfaces to be replaced with tuned code. + * + * + * 4) Optional: Mail the results to mccalpin@cs.virginia.edu + * Be sure to include info that will help me understand: + * a) the computer hardware configuration (e.g., processor model, memory type) + * b) the compiler name/version and compilation flags + * c) any run-time information (such as OMP_NUM_THREADS) + * d) all of the output from the test case. + * + * Thanks! + * + *-----------------------------------------------------------------------*/ + +# define HLINE "-------------------------------------------------------------\n" + +# ifndef MIN +# define MIN(x,y) ((x)<(y)?(x):(y)) +# endif +# ifndef MAX +# define MAX(x,y) ((x)>(y)?(x):(y)) +# endif + +#ifndef STREAM_TYPE +#define STREAM_TYPE double +#endif + +//static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], +// b[STREAM_ARRAY_SIZE+OFFSET], +// c[STREAM_ARRAY_SIZE+OFFSET]; + +// Some compilers require an extra keyword to recognize the "restrict" qualifier. +double * restrict a, * restrict b, * restrict c; + +size_t array_elements, array_bytes, array_alignment; +static double avgtime[4] = {0}, maxtime[4] = {0}, + mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; + +static char *label[4] = {"Copy: ", "Scale: ", + "Add: ", "Triad: "}; + +static double bytes[4] = { + 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, + 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, + 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, + 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE + }; + +extern void checkSTREAMresults(STREAM_TYPE *AvgErrByRank, int numranks); +extern void computeSTREAMerrors(STREAM_TYPE *aAvgErr, STREAM_TYPE *bAvgErr, STREAM_TYPE *cAvgErr); +#ifdef TUNED +extern void tuned_STREAM_Copy(); +extern void tuned_STREAM_Scale(STREAM_TYPE scalar); +extern void tuned_STREAM_Add(); +extern void tuned_STREAM_Triad(STREAM_TYPE scalar); +#endif +#ifdef _OPENMP +extern int omp_get_num_threads(); +#endif +int +main() + { + int quantum, checktick(); + int BytesPerWord; + int i,k; + ssize_t j; + STREAM_TYPE scalar; + double t, times[4][NTIMES]; + double *TimesByRank; + double t0,t1,tmin; + int rc, numranks, myrank; + STREAM_TYPE AvgError[3] = {0.0,0.0,0.0}; + STREAM_TYPE *AvgErrByRank; + + /* --- SETUP --- call MPI_Init() before anything else! --- */ + + rc = MPI_Init(NULL, NULL); + t0 = MPI_Wtime(); + if (rc != MPI_SUCCESS) { + printf("ERROR: MPI Initialization failed with return code %d\n",rc); + exit(1); + } + // if either of these fail there is something really screwed up! + MPI_Comm_size(MPI_COMM_WORLD, &numranks); + MPI_Comm_rank(MPI_COMM_WORLD, &myrank); + + /* --- NEW FEATURE --- distribute requested storage across MPI ranks --- */ + array_elements = STREAM_ARRAY_SIZE / numranks; // don't worry about rounding vs truncation + array_alignment = 64; // Can be modified -- provides partial support for adjusting relative alignment + + // Dynamically allocate the three arrays using "posix_memalign()" + // NOTE that the OFFSET parameter is not used in this version of the code! + array_bytes = array_elements * sizeof(STREAM_TYPE); + k = posix_memalign((void **)&a, array_alignment, array_bytes); + if (k != 0) { + printf("Rank %d: Allocation of array a failed, return code is %d\n",myrank,k); + MPI_Abort(MPI_COMM_WORLD, 2); + exit(1); + } + k = posix_memalign((void **)&b, array_alignment, array_bytes); + if (k != 0) { + printf("Rank %d: Allocation of array b failed, return code is %d\n",myrank,k); + MPI_Abort(MPI_COMM_WORLD, 2); + exit(1); + } + k = posix_memalign((void **)&c, array_alignment, array_bytes); + if (k != 0) { + printf("Rank %d: Allocation of array c failed, return code is %d\n",myrank,k); + MPI_Abort(MPI_COMM_WORLD, 2); + exit(1); + } + + // Initial informational printouts -- rank 0 handles all the output + if (myrank == 0) { + printf(HLINE); + printf("STREAM version $Revision: 1.8 $\n"); + printf(HLINE); + BytesPerWord = sizeof(STREAM_TYPE); + printf("This system uses %d bytes per array element.\n", + BytesPerWord); + + printf(HLINE); +#ifdef N + printf("***** WARNING: ******\n"); + printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); + printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); + printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); + printf("***** WARNING: ******\n"); +#endif + if (OFFSET != 0) { + printf("***** WARNING: ******\n"); + printf(" This version ignores the OFFSET parameter.\n"); + printf("***** WARNING: ******\n"); + } + + printf("Total Aggregate Array size = %llu (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE); + printf("Total Aggregate Memory per array = %.1f MiB (= %.1f GiB).\n", + BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), + BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); + printf("Total Aggregate memory required = %.1f MiB (= %.1f GiB).\n", + (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), + (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); + printf("Data is distributed across %d MPI ranks\n",numranks); + printf(" Array size per MPI rank = %llu (elements)\n" , (unsigned long long) array_elements); + printf(" Memory per array per MPI rank = %.1f MiB (= %.1f GiB).\n", + BytesPerWord * ( (double) array_elements / 1024.0/1024.0), + BytesPerWord * ( (double) array_elements / 1024.0/1024.0/1024.0)); + printf(" Total memory per MPI rank = %.1f MiB (= %.1f GiB).\n", + (3.0 * BytesPerWord) * ( (double) array_elements / 1024.0/1024.), + (3.0 * BytesPerWord) * ( (double) array_elements / 1024.0/1024./1024.)); + + printf(HLINE); + printf("Each kernel will be executed %d times.\n", NTIMES); + printf(" The *best* time for each kernel (excluding the first iteration)\n"); + printf(" will be used to compute the reported bandwidth.\n"); + printf("The SCALAR value used for this run is %f\n",SCALAR); + +#ifdef _OPENMP + printf(HLINE); +#pragma omp parallel + { +#pragma omp master + { + k = omp_get_num_threads(); + printf ("Number of Threads requested for each MPI rank = %i\n",k); + } + } +#endif + +#ifdef _OPENMP + k = 0; +#pragma omp parallel +#pragma omp atomic + k++; + printf ("Number of Threads counted for rank 0 = %i\n",k); +#endif + + } + + /* --- SETUP --- initialize arrays and estimate precision of timer --- */ + +#pragma omp parallel for + for (j=0; j= 1) + printf("Your timer granularity/precision appears to be " + "%d microseconds.\n", quantum); + else { + printf("Your timer granularity appears to be " + "less than one microsecond.\n"); + quantum = 1; + } + } + + /* Get initial timing estimate to compare to timer granularity. */ + /* All ranks need to run this code since it changes the values in array a */ + t = MPI_Wtime(); +#pragma omp parallel for + for (j = 0; j < array_elements; j++) + a[j] = 2.0E0 * a[j]; + t = 1.0E6 * (MPI_Wtime() - t); + + if (myrank == 0) { + printf("Each test below will take on the order" + " of %d microseconds.\n", (int) t ); + printf(" (= %d timer ticks)\n", (int) (t/quantum) ); + printf("Increase the size of the arrays if this shows that\n"); + printf("you are not getting at least 20 timer ticks per test.\n"); + + printf(HLINE); + + printf("WARNING -- The above is only a rough guideline.\n"); + printf("For best results, please be sure you know the\n"); + printf("precision of your system timer.\n"); + printf(HLINE); +#ifdef VERBOSE + t1 = MPI_Wtime(); + printf("VERBOSE: total setup time for rank 0 = %f seconds\n",t1-t0); + printf(HLINE); +#endif + } + + /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ + + // This code has more barriers and timing calls than are actually needed, but + // this should not cause a problem for arrays that are large enough to satisfy + // the STREAM run rules. + // MAJOR FIX!!! Version 1.7 had the start timer for each loop *after* the + // MPI_Barrier(), when it should have been *before* the MPI_Barrier(). + // + + scalar = SCALAR; + for (k=0; k= 0 ? (a) : -(a)) +#endif +void computeSTREAMerrors(STREAM_TYPE *aAvgErr, STREAM_TYPE *bAvgErr, STREAM_TYPE *cAvgErr) +{ + STREAM_TYPE aj,bj,cj,scalar; + STREAM_TYPE aSumErr,bSumErr,cSumErr; + ssize_t j; + int k; + + /* reproduce initialization */ + aj = 1.0; + bj = 2.0; + cj = 0.0; + /* a[] is modified during timing check */ + aj = 2.0E0 * aj; + /* now execute timing loop */ + scalar = SCALAR; + for (k=0; k epsilon) { + err++; + printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); + printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); + ierr = 0; + for (j=0; j epsilon) { + ierr++; +#ifdef VERBOSE + if (ierr < 10) { + printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", + j,aj,a[j],abs((aj-a[j])/aAvgErr)); + } +#endif + } + } + printf(" For array a[], %d errors were found.\n",ierr); + } + if (abs(bAvgErr/bj) > epsilon) { + err++; + printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); + printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); + printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); + ierr = 0; + for (j=0; j epsilon) { + ierr++; +#ifdef VERBOSE + if (ierr < 10) { + printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", + j,bj,b[j],abs((bj-b[j])/bAvgErr)); + } +#endif + } + } + printf(" For array b[], %d errors were found.\n",ierr); + } + if (abs(cAvgErr/cj) > epsilon) { + err++; + printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); + printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); + printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); + ierr = 0; + for (j=0; j epsilon) { + ierr++; +#ifdef VERBOSE + if (ierr < 10) { + printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", + j,cj,c[j],abs((cj-c[j])/cAvgErr)); + } +#endif + } + } + printf(" For array c[], %d errors were found.\n",ierr); + } + if (err == 0) { + printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); + } +#ifdef VERBOSE + printf ("Results Validation Verbose Results: \n"); + printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); + printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); + printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); +#endif +} + +#ifdef TUNED +/* stubs for "tuned" versions of the kernels */ +void tuned_STREAM_Copy() +{ + ssize_t j; +#pragma omp parallel for + for (j=0; j +#include +#include +#include +#include +#include +#include + +#ifdef _OPENMP +#include +#endif + +#ifndef STREAM_ARRAY_SIZE +#define STREAM_ARRAY_SIZE 100000000 +#endif + +#define NTIMES 10 +#define OFFSET 0 + +# define HLINE "-------------------------------------------------------------\n" + +#ifndef MIN +#define MIN(x,y) ((x)<(y)?(x):(y)) +#endif +#ifndef MAX +#define MAX(x,y) ((x)>(y)?(x):(y)) +#endif + +#ifndef STREAM_TYPE +#define STREAM_TYPE double +#endif + +#define STREAM_RESTRICT __restrict__ + +static STREAM_TYPE* STREAM_RESTRICT a; +static STREAM_TYPE* STREAM_RESTRICT b; +static STREAM_TYPE* STREAM_RESTRICT c; + +static double avgtime[4] = {0}, maxtime[4] = {0}, + mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; + +static char *label[4] = {"Copy: ", "Scale: ", + "Add: ", "Triad: "}; + +static double bytes[4] = { + 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, + 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, + 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, + 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE + }; + +double mysecond(); +void checkSTREAMresults(); + + +int main() { + int quantum, checktick(); + int BytesPerWord; + int k; + ssize_t j; + STREAM_TYPE scalar; + double t, times[4][NTIMES]; + + /* --- SETUP --- determine precision and check timing --- */ + + printf(HLINE); + printf("STREAM Crossroads Memory Bandwidth\n"); + printf("(Based on the original STREAM benchmark by John D. McCalpin)\n"); + printf(HLINE); + BytesPerWord = sizeof(STREAM_TYPE); + printf("This system uses %d bytes per array element.\n", + BytesPerWord); + + printf(HLINE); + + const unsigned long long int array_size = (sizeof(STREAM_TYPE) * (STREAM_ARRAY_SIZE + OFFSET)); + + printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) array_size, OFFSET); + printf("Memory per array = %.1f MiB (= %.1f GiB).\n", + BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), + BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); + printf("Total memory required = %.1f MiB (= %.1f GiB).\n", + (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), + (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); + printf("Each kernel will be executed %d times.\n", NTIMES); + printf(" The *best* time for each kernel (excluding the first iteration)\n"); + printf(" will be used to compute the reported bandwidth.\n"); + + printf("Allocating arrays ...\n"); + + /////////////////////////////////////////////////////////////////////////////// + // VENDOR NOTIFICATION + // + // Memory allocation routines should be changed to reflect allocation in memory + // hierarchy level. All modifications should be reported in benchmark response + // + /////////////////////////////////////////////////////////////////////////////// + + posix_memalign((void**) &a, 64, array_size ); + posix_memalign((void**) &b, 64, array_size ); + posix_memalign((void**) &c, 64, array_size ); + + /////////////////////////////////////////////////////////////////////////////// + // END VENDOR NOTIFICATION + /////////////////////////////////////////////////////////////////////////////// + +#ifdef _OPENMP + k = 0; +#pragma omp parallel +#pragma omp atomic + k++; + printf ("Number of Threads counted = %i\n",k); +#endif + + printf("Populating values and performing first touch ... \n"); + /* Get initial value for system clock. */ +#pragma omp parallel for + for (j=0; j= 1) + printf("Your clock granularity/precision appears to be " + "%d microseconds.\n", quantum); + else { + printf("Your clock granularity appears to be " + "less than one microsecond.\n"); + quantum = 1; + } + + t = mysecond(); +#pragma omp parallel for + for (j = 0; j < STREAM_ARRAY_SIZE; j++) + a[j] = 2.0E0 * a[j]; + t = 1.0E6 * (mysecond() - t); + + printf("Each test below will take on the order" + " of %d microseconds.\n", (int) t ); + printf(" (= %d clock ticks)\n", (int) (t/quantum) ); + printf("Increase the size of the arrays if this shows that\n"); + printf("you are not getting at least 20 clock ticks per test.\n"); + + printf(HLINE); + + printf("WARNING -- The above is only a rough guideline.\n"); + printf("For best results, please be sure you know the\n"); + printf("precision of your system timer.\n"); + printf(HLINE); + + /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ + + scalar = 3.0; + for (k=0; k < NTIMES; k++) + { + times[0][k] = mysecond(); + #pragma omp parallel for + for (j=0; j + +double mysecond() +{ + struct timeval tp; + struct timezone tzp; + int i; + + i = gettimeofday(&tp,&tzp); + return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); +} + +#ifndef abs +#define abs(a) ((a) >= 0 ? (a) : -(a)) +#endif +void checkSTREAMresults () +{ + STREAM_TYPE aj,bj,cj,scalar; + STREAM_TYPE aSumErr,bSumErr,cSumErr; + STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; + double epsilon; + ssize_t j; + int k,ierr,err; + + /* reproduce initialization */ + aj = 1.0; + bj = 2.0; + cj = 0.0; + /* a[] is modified during timing check */ + aj = 2.0E0 * aj; + /* now execute timing loop */ + scalar = 3.0; + for (k=0; k epsilon) { + err++; + printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); + printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); + ierr = 0; + for (j=0; j epsilon) { + ierr++; +#ifdef VERBOSE + if (ierr < 10) { + printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", + j,aj,a[j],abs((aj-a[j])/aAvgErr)); + } +#endif + } + } + printf(" For array a[], %d errors were found.\n",ierr); + } + if (abs(bAvgErr/bj) > epsilon) { + err++; + printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); + printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); + printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); + ierr = 0; + for (j=0; j epsilon) { + ierr++; +#ifdef VERBOSE + if (ierr < 10) { + printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", + j,bj,b[j],abs((bj-b[j])/bAvgErr)); + } +#endif + } + } + printf(" For array b[], %d errors were found.\n",ierr); + } + if (abs(cAvgErr/cj) > epsilon) { + err++; + printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); + printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); + printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); + ierr = 0; + for (j=0; j epsilon) { + ierr++; +#ifdef VERBOSE + if (ierr < 10) { + printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", + j,cj,c[j],abs((cj-c[j])/cAvgErr)); + } +#endif + } + } + printf(" For array c[], %d errors were found.\n",ierr); + } + if (err == 0) { + printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); + } +#ifdef VERBOSE + printf ("Results Validation Verbose Results: \n"); + printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); + printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); + printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); +#endif +} diff --git a/utils/pav_config/test_src/dgemm b/utils/pav_config/test_src/dgemm new file mode 120000 index 00000000..8c5d18b9 --- /dev/null +++ b/utils/pav_config/test_src/dgemm @@ -0,0 +1 @@ +../../../microbenchmarks/dgemm \ No newline at end of file diff --git a/utils/pav_config/test_src/ior b/utils/pav_config/test_src/ior new file mode 120000 index 00000000..06a5443f --- /dev/null +++ b/utils/pav_config/test_src/ior @@ -0,0 +1 @@ +../../../microbenchmarks/ior \ No newline at end of file diff --git a/utils/pav_config/test_src/mdtest b/utils/pav_config/test_src/mdtest new file mode 120000 index 00000000..e7d2e746 --- /dev/null +++ b/utils/pav_config/test_src/mdtest @@ -0,0 +1 @@ +../../../microbenchmarks/mdtest \ No newline at end of file diff --git a/utils/pav_config/test_src/osumb b/utils/pav_config/test_src/osumb new file mode 120000 index 00000000..1315ac3b --- /dev/null +++ b/utils/pav_config/test_src/osumb @@ -0,0 +1 @@ +../../../microbenchmarks/osumb \ No newline at end of file diff --git a/utils/pav_config/test_src/stream b/utils/pav_config/test_src/stream new file mode 120000 index 00000000..55789187 --- /dev/null +++ b/utils/pav_config/test_src/stream @@ -0,0 +1 @@ +../../../microbenchmarks/stream \ No newline at end of file diff --git a/utils/pav_config/tests/stream.yaml b/utils/pav_config/tests/stream.yaml index ba2c09d0..ed62f732 100644 --- a/utils/pav_config/tests/stream.yaml +++ b/utils/pav_config/tests/stream.yaml @@ -132,14 +132,14 @@ _base: - 'fi' - 'export PAV_CC PAV_FC PAV_CFLAGS PAV_FFLAGS' - 'make clean' - - 'make {{target}} || exit 1' + - 'make all || exit 1' - '[ -x {{target}} ] || exit 1' run: env: CC: '{{compilers.cc}}' OMP_NUM_THREADS: '{{omp_num_threads}}' - GOMP_CPU_AFFINITY: '0-{{omp_num_threads-1}}' +# GOMP_CPU_AFFINITY: '0-{{omp_num_threads-1}}' preamble: - 'module load friendly-testing' - 'module load {{compilers.name}}/{{compilers.version}}' @@ -147,7 +147,7 @@ _base: cmds: - 'NTIMES={{ntimes}}' - 'N={{stream_array_size}}000000' - - 'echo "GOMP_CPU_AFFINITY: $GOMP_CPU_AFFINITY"' + # - 'echo "GOMP_CPU_AFFINITY: $GOMP_CPU_AFFINITY"' - 'echo "OMP_NUM_THREADS: $OMP_NUM_THREADS"' - 'echo "NTIMES=$NTIMES"' - 'echo "N=${N}"' @@ -363,16 +363,13 @@ cts1_ats5: numnodes: '1' omp_num_threads: '1' stream_array_size: '40' + target: "stream_mpi.exe" schedule: nodes: "{{numnodes}}" share_allocation: true tasks_per_node: "{{tpn}}" - run: - env: - GOMP_CPU_AFFINITY: '' - result_parse: regex: triad_once: @@ -399,9 +396,9 @@ xrds_ats5: variables: tpn: [8, 32, 56, 88, 112] arch: "spr" - target: "xrds-stream.exe" + target: "xrds_stream.exe" stream_array_size: '40' - #ntimes: 20 + ntimes: 20 #omp_places: [cores, sockets] #omp_proc_bind: [true] numnodes: '1' @@ -410,7 +407,6 @@ xrds_ats5: chunk: '{{chunk_ids.0}}' schedule: - partition: 'hbm' nodes: "{{numnodes}}" share_allocation: true tasks_per_node: "{{tpn}}" @@ -433,9 +429,6 @@ xrds_ats5: - 'module load {{compilers.name}}/{{compilers.version}}' - 'module load {{mpis.name}}/{{mpis.version}}' - env: - GOMP_CPU_AFFINITY: '' - result_parse: regex: triad_once: @@ -447,3 +440,9 @@ xrds_ats5: result_evaluate: per_proc_bw: 'sum(triad_once)/len(triad_once)' total_bw: 'sum(triad_once)' + +roci_ats5: + inherits_from: xrds_ats5 + + schedule: + partition: 'hbm'