From a3f29a382338e59818b4f6e918992b2153cc8ccb Mon Sep 17 00:00:00 2001 From: wyy566 <531938832@qq.com> Date: Thu, 9 Jun 2022 14:44:52 +0800 Subject: [PATCH] adapt to Spark 3.1.1 --- README.md | 16 +- ml-accelerator/pom.xml | 6 +- .../optimize/FirstOrderMinimizerX.scala | 291 - .../main/scala/breeze/optimize/LBFGSX.scala | 110 - .../main/scala/breeze/optimize/OWLQNX.scala | 97 - .../DecisionTreeClassifier.scala | 160 - .../ml/classification/GBTClassifier.scala | 423 - .../spark/ml/classification/LinearSVC.scala | 304 - .../classification/LogisticRegression.scala | 703 -- .../RandomForestClassifier.scala | 356 - .../org/apache/spark/ml/feature/IDF.scala | 195 - .../org/apache/spark/ml/fpm/PrefixSpan.scala | 168 + .../DifferentiableLossAggregatorX.scala | 98 - .../optim/aggregator/HingeAggregatorX.scala | 112 - .../optim/aggregator/HuberAggregatorX.scala | 163 - .../aggregator/LeastSquaresAggregatorX.scala | 226 - .../aggregator/LogisticAggregatorX.scala | 379 - .../ml/optim/loss/RDDLossFunctionX.scala | 77 - .../apache/spark/ml/recommendation/ALS.scala | 174 +- .../ml/regression/DecisionTreeRegressor.scala | 184 - .../spark/ml/regression/GBTRegressor.scala | 354 - .../ml/regression/LinearRegression.scala | 565 -- .../ml/regression/RandomForestRegressor.scala | 312 - .../apache/spark/ml/stat/Correlation.scala | 93 - .../spark/ml/tree/impl/DecisionForest.scala | 1275 --- .../ml/tree/impl/GradientBoostedTrees.scala | 663 -- .../spark/ml/tree/impl/NodeIdCache.scala | 360 - .../spark/ml/tree/impl/RandomForest.scala | 1231 --- .../ml/tree/impl/RandomForest4GBDTX.scala | 621 -- .../spark/ml/tree/impl/RandomForestRaw.scala | 1157 --- .../org/apache/spark/ml/tree/treeParams.scala | 611 -- .../spark/mllib/clustering/KMACCm.scala | 124 - .../spark/mllib/clustering/KMeans.scala | 616 -- .../apache/spark/mllib/clustering/LDA.scala | 16 +- .../spark/mllib/clustering/LDAOptimizer.scala | 70 +- .../spark/mllib/clustering/LocalKMeansX.scala | 156 - .../org/apache/spark/mllib/feature/IDF.scala | 231 - .../apache/spark/mllib/fpm/PrefixSpan.scala | 18 +- .../linalg/EigenValueDecomposition.scala | 285 - .../mllib/linalg/distributed/RowMatrix.scala | 910 -- .../mllib/stat/correlation/Correlation.scala | 103 - .../stat/correlation/PearsonCorrelation.scala | 119 - .../correlation/SpearmanCorrelation.scala | 65 - .../spark/mllib/tree/DecisionTree.scala | 291 - ml-core/pom.xml | 4 +- .../main/scala/breeze/numerics/DigammaX.scala | 6 - .../scala/org/apache/spark/ml/tree/Node.scala | 635 -- .../org/apache/spark/ml/tree/Split.scala | 274 - .../spark/ml/tree/impl/BaggedPoint.scala | 135 - .../tree/impl/DTFeatureStatsAggregator.scala | 111 - .../ml/tree/impl/DTStatsAggregator.scala | 187 - .../tree/impl/GradientBoostedTreesCore.scala | 256 - .../spark/ml/tree/impl/TreePointX.scala | 6 - .../spark/ml/tree/impl/TreePointY.scala | 6 - .../spark/mllib/clustering/LDAUtilsX.scala | 6 - .../clustering/OnlineLDAOptimizerXObj.scala | 45 +- .../spark/mllib/fpm/LocalPrefixSpan.scala | 6 - .../spark/mllib/fpm/PrefixSpanBase.scala | 6 - .../spark/mllib/tree/impurity/Entropy.scala | 171 - .../spark/mllib/tree/impurity/Gini.scala | 168 - .../mllib/tree/impurity/Impurities.scala | 38 - .../spark/mllib/tree/impurity/Impurity.scala | 204 - .../spark/mllib/tree/impurity/Variance.scala | 148 - ml-kernel-client-core/pom.xml | 2 +- ml-kernel-client/pom.xml | 4 +- .../scala/breeze/linalg/blas/YTYUtils.scala | 5 +- .../src/main/scala/breeze/optimize/ACC.scala | 47 - .../main/scala/breeze/optimize/LBFGSL.scala | 84 - .../main/scala/breeze/optimize/OWLQNL.scala | 78 - .../spark/ml/recommendation/ALSUtils.scala | 5 +- .../tree/impl/GradientBoostedTreesUtil.scala | 77 - .../spark/mllib.clustering/KmeansUtil.scala | 38 - .../spark/mllib.clustering/LDAUtilsXOpt.scala | 3 +- ml-xgboost/.clang-tidy | 21 - ml-xgboost/.editorconfig | 11 - ml-xgboost/.gitmodules | 3 - ml-xgboost/.travis.yml | 79 - ml-xgboost/CITATION | 18 - ml-xgboost/CMakeLists.txt | 310 - ml-xgboost/CONTRIBUTORS.md | 104 - ml-xgboost/Jenkinsfile | 435 - ml-xgboost/Jenkinsfile-win64 | 151 - ml-xgboost/LICENSE | 201 - ml-xgboost/Makefile | 147 - ml-xgboost/NEWS.md | 1100 --- ml-xgboost/R-package/.Rbuildignore | 6 - ml-xgboost/R-package/CMakeLists.txt | 38 - ml-xgboost/R-package/DESCRIPTION | 67 - ml-xgboost/R-package/LICENSE | 13 - ml-xgboost/R-package/NAMESPACE | 91 - ml-xgboost/R-package/R/callbacks.R | 831 -- ml-xgboost/R-package/R/utils.R | 352 - ml-xgboost/R-package/R/xgb.Booster.R | 711 -- ml-xgboost/R-package/R/xgb.DMatrix.R | 380 - ml-xgboost/R-package/R/xgb.DMatrix.save.R | 24 - ml-xgboost/R-package/R/xgb.create.features.R | 87 - ml-xgboost/R-package/R/xgb.cv.R | 319 - ml-xgboost/R-package/R/xgb.dump.R | 72 - ml-xgboost/R-package/R/xgb.ggplot.R | 135 - ml-xgboost/R-package/R/xgb.importance.R | 139 - ml-xgboost/R-package/R/xgb.load.R | 47 - ml-xgboost/R-package/R/xgb.load.raw.R | 14 - ml-xgboost/R-package/R/xgb.model.dt.tree.R | 159 - ml-xgboost/R-package/R/xgb.plot.deepness.R | 150 - ml-xgboost/R-package/R/xgb.plot.importance.R | 125 - ml-xgboost/R-package/R/xgb.plot.multi.trees.R | 148 - ml-xgboost/R-package/R/xgb.plot.shap.R | 218 - ml-xgboost/R-package/R/xgb.plot.tree.R | 138 - ml-xgboost/R-package/R/xgb.save.R | 43 - ml-xgboost/R-package/R/xgb.save.raw.R | 23 - ml-xgboost/R-package/R/xgb.serialize.R | 21 - ml-xgboost/R-package/R/xgb.train.R | 377 - ml-xgboost/R-package/R/xgb.unserialize.R | 12 - ml-xgboost/R-package/R/xgboost.R | 113 - ml-xgboost/R-package/README.md | 33 - ml-xgboost/R-package/cleanup | 4 - ml-xgboost/R-package/configure | 3891 -------- ml-xgboost/R-package/configure.ac | 55 - ml-xgboost/R-package/configure.win | 0 ml-xgboost/R-package/demo/00Index | 15 - ml-xgboost/R-package/demo/README.md | 20 - ml-xgboost/R-package/demo/basic_walkthrough.R | 112 - .../R-package/demo/boost_from_prediction.R | 26 - ml-xgboost/R-package/demo/caret_wrapper.R | 35 - .../R-package/demo/create_sparse_matrix.R | 89 - ml-xgboost/R-package/demo/cross_validation.R | 51 - ml-xgboost/R-package/demo/custom_objective.R | 65 - ml-xgboost/R-package/demo/early_stopping.R | 40 - .../R-package/demo/generalized_linear_model.R | 34 - ml-xgboost/R-package/demo/gpu_accelerated.R | 45 - .../R-package/demo/interaction_constraints.R | 105 - .../R-package/demo/poisson_regression.R | 7 - .../R-package/demo/predict_first_ntree.R | 23 - .../R-package/demo/predict_leaf_indices.R | 53 - ml-xgboost/R-package/demo/runall.R | 14 - .../R-package/demo/tweedie_regression.R | 49 - ml-xgboost/R-package/man/agaricus.test.Rd | 33 - ml-xgboost/R-package/man/agaricus.train.Rd | 33 - ml-xgboost/R-package/man/callbacks.Rd | 37 - ml-xgboost/R-package/man/cb.cv.predict.Rd | 43 - ml-xgboost/R-package/man/cb.early.stop.Rd | 66 - ml-xgboost/R-package/man/cb.evaluation.log.Rd | 31 - .../R-package/man/cb.gblinear.history.Rd | 95 - .../R-package/man/cb.print.evaluation.Rd | 29 - .../R-package/man/cb.reset.parameters.Rd | 36 - ml-xgboost/R-package/man/cb.save.model.Rd | 33 - ml-xgboost/R-package/man/dim.xgb.DMatrix.Rd | 28 - .../R-package/man/dimnames.xgb.DMatrix.Rd | 35 - ml-xgboost/R-package/man/getinfo.Rd | 45 - .../R-package/man/predict.xgb.Booster.Rd | 202 - ml-xgboost/R-package/man/print.xgb.Booster.Rd | 29 - ml-xgboost/R-package/man/print.xgb.DMatrix.Rd | 28 - ml-xgboost/R-package/man/print.xgb.cv.Rd | 31 - ml-xgboost/R-package/man/setinfo.Rd | 43 - ml-xgboost/R-package/man/slice.xgb.DMatrix.Rd | 40 - .../R-package/man/xgb.Booster.complete.Rd | 50 - ml-xgboost/R-package/man/xgb.DMatrix.Rd | 35 - ml-xgboost/R-package/man/xgb.DMatrix.save.Rd | 24 - ml-xgboost/R-package/man/xgb.attr.Rd | 86 - ml-xgboost/R-package/man/xgb.config.Rd | 28 - .../R-package/man/xgb.create.features.Rd | 92 - ml-xgboost/R-package/man/xgb.cv.Rd | 164 - ml-xgboost/R-package/man/xgb.dump.Rd | 62 - .../R-package/man/xgb.gblinear.history.Rd | 29 - ml-xgboost/R-package/man/xgb.importance.Rd | 101 - ml-xgboost/R-package/man/xgb.load.Rd | 41 - ml-xgboost/R-package/man/xgb.load.raw.Rd | 14 - ml-xgboost/R-package/man/xgb.model.dt.tree.Rd | 83 - ml-xgboost/R-package/man/xgb.parameters.Rd | 31 - ml-xgboost/R-package/man/xgb.plot.deepness.Rd | 80 - .../R-package/man/xgb.plot.importance.Rd | 94 - .../R-package/man/xgb.plot.multi.trees.Rd | 82 - ml-xgboost/R-package/man/xgb.plot.shap.Rd | 158 - ml-xgboost/R-package/man/xgb.plot.tree.Rd | 91 - ml-xgboost/R-package/man/xgb.save.Rd | 41 - ml-xgboost/R-package/man/xgb.save.raw.Rd | 27 - ml-xgboost/R-package/man/xgb.serialize.Rd | 29 - ml-xgboost/R-package/man/xgb.train.Rd | 299 - ml-xgboost/R-package/man/xgb.unserialize.Rd | 14 - .../R-package/man/xgboost-deprecated.Rd | 16 - .../remove_warning_suppression_pragma.sh | 14 - ml-xgboost/R-package/src/Makevars.in | 24 - ml-xgboost/R-package/src/Makevars.win | 38 - ml-xgboost/R-package/src/init.c | 85 - ml-xgboost/R-package/src/xgboost_R.cc | 491 - ml-xgboost/R-package/src/xgboost_R.h | 247 - ml-xgboost/R-package/src/xgboost_assert.c | 26 - ml-xgboost/R-package/src/xgboost_custom.cc | 69 - ml-xgboost/R-package/tests/testthat.R | 4 - .../R-package/tests/testthat/test_basic.R | 384 - .../R-package/tests/testthat/test_callbacks.R | 330 - .../tests/testthat/test_custom_objective.R | 77 - .../R-package/tests/testthat/test_dmatrix.R | 117 - .../R-package/tests/testthat/test_gc_safety.R | 15 - .../R-package/tests/testthat/test_glm.R | 48 - .../R-package/tests/testthat/test_helpers.R | 376 - .../testthat/test_interaction_constraints.R | 55 - .../tests/testthat/test_interactions.R | 141 - .../R-package/tests/testthat/test_lint.R | 27 - .../R-package/tests/testthat/test_monotone.R | 24 - .../tests/testthat/test_parameter_exposure.R | 30 - .../tests/testthat/test_poisson_regression.R | 14 - .../R-package/tests/testthat/test_update.R | 107 - .../R-package/vignettes/discoverYourData.Rmd | 338 - ml-xgboost/R-package/vignettes/vignette.css | 225 - ml-xgboost/R-package/vignettes/xgboost.Rnw | 222 - ml-xgboost/R-package/vignettes/xgboost.bib | 30 - .../vignettes/xgboostPresentation.Rmd | 428 - .../R-package/vignettes/xgboostfromJSON.Rmd | 189 - ml-xgboost/README.md | 67 - ml-xgboost/amalgamation/dmlc-minimum0.cc | 16 - ml-xgboost/amalgamation/xgboost-all0.cc | 83 - ml-xgboost/appveyor.yml | 133 - ml-xgboost/cmake/Doc.cmake | 16 - ml-xgboost/cmake/FindPrefetchIntrinsics.cmake | 22 - ml-xgboost/cmake/Python_version.in | 1 - ml-xgboost/cmake/Sanitizer.cmake | 63 - ml-xgboost/cmake/Utils.cmake | 143 - ml-xgboost/cmake/Version.cmake | 9 - ml-xgboost/cmake/modules/FindASan.cmake | 13 - ml-xgboost/cmake/modules/FindLSan.cmake | 13 - ml-xgboost/cmake/modules/FindLibR.cmake | 183 - ml-xgboost/cmake/modules/FindNVML.cmake | 23 - ml-xgboost/cmake/modules/FindNccl.cmake | 65 - ml-xgboost/cmake/modules/FindTSan.cmake | 13 - ml-xgboost/cmake/modules/FindUBSan.cmake | 13 - ml-xgboost/cmake/version_config.h.in | 11 - ml-xgboost/cmake/xgboost-config.cmake.in | 5 - ml-xgboost/cub/.cproject | 1211 --- ml-xgboost/cub/CHANGE_LOG.TXT | 346 - ml-xgboost/cub/LICENSE.TXT | 24 - ml-xgboost/cub/README.md | 128 - ml-xgboost/cub/common.mk | 233 - ml-xgboost/cub/cub/agent/agent_histogram.cuh | 783 -- .../cub/agent/agent_radix_sort_downsweep.cuh | 753 -- .../cub/agent/agent_radix_sort_upsweep.cuh | 449 - ml-xgboost/cub/cub/agent/agent_reduce.cuh | 475 - .../cub/cub/agent/agent_reduce_by_key.cuh | 549 -- ml-xgboost/cub/cub/agent/agent_rle.cuh | 830 -- ml-xgboost/cub/cub/agent/agent_scan.cuh | 471 - .../cub/cub/agent/agent_segment_fixup.cuh | 375 - ml-xgboost/cub/cub/agent/agent_select_if.cuh | 703 -- ml-xgboost/cub/cub/agent/agent_spmv_csrt.cuh | 638 -- ml-xgboost/cub/cub/agent/agent_spmv_orig.cuh | 924 -- .../cub/cub/agent/agent_spmv_row_based.cuh | 470 - .../cub/agent/single_pass_scan_operators.cuh | 792 -- .../cub/block/block_adjacent_difference.cuh | 596 -- .../cub/cub/block/block_discontinuity.cuh | 1148 --- ml-xgboost/cub/cub/block/block_exchange.cuh | 1248 --- ml-xgboost/cub/cub/block/block_histogram.cuh | 415 - ml-xgboost/cub/cub/block/block_load.cuh | 1268 --- ml-xgboost/cub/cub/block/block_radix_rank.cuh | 432 - ml-xgboost/cub/cub/block/block_radix_sort.cuh | 865 -- .../cub/cub/block/block_raking_layout.cuh | 153 - ml-xgboost/cub/cub/block/block_reduce.cuh | 607 -- ml-xgboost/cub/cub/block/block_scan.cuh | 2126 ----- ml-xgboost/cub/cub/block/block_shuffle.cuh | 305 - ml-xgboost/cub/cub/block/block_store.cuh | 1000 -- .../block_histogram_atomic.cuh | 82 - .../specializations/block_histogram_sort.cuh | 226 - .../specializations/block_reduce_raking.cuh | 222 - .../block_reduce_raking_commutative_only.cuh | 202 - .../block_reduce_warp_reductions.cuh | 222 - .../specializations/block_scan_raking.cuh | 666 -- .../specializations/block_scan_warp_scans.cuh | 392 - .../block_scan_warp_scans2.cuh | 436 - .../block_scan_warp_scans3.cuh | 412 - ml-xgboost/cub/cub/cub.cuh | 96 - .../cub/cub/device/device_histogram.cuh | 866 -- .../cub/cub/device/device_partition.cuh | 273 - .../cub/cub/device/device_radix_sort.cuh | 796 -- ml-xgboost/cub/cub/device/device_reduce.cuh | 699 -- .../cub/device/device_run_length_encode.cuh | 278 - ml-xgboost/cub/cub/device/device_scan.cuh | 423 - .../device/device_segmented_radix_sort.cuh | 855 -- .../cub/device/device_segmented_reduce.cuh | 607 -- ml-xgboost/cub/cub/device/device_select.cuh | 369 - ml-xgboost/cub/cub/device/device_spmv.cuh | 174 - .../device/dispatch/dispatch_histogram.cuh | 1085 --- .../device/dispatch/dispatch_radix_sort.cuh | 1572 ---- .../cub/device/dispatch/dispatch_reduce.cuh | 928 -- .../dispatch/dispatch_reduce_by_key.cuh | 554 -- .../cub/cub/device/dispatch/dispatch_rle.cuh | 538 -- .../cub/cub/device/dispatch/dispatch_scan.cuh | 563 -- .../device/dispatch/dispatch_select_if.cuh | 542 -- .../device/dispatch/dispatch_spmv_csrt.cuh | 477 - .../device/dispatch/dispatch_spmv_orig.cuh | 850 -- .../dispatch/dispatch_spmv_row_based.cuh | 877 -- ml-xgboost/cub/cub/grid/grid_barrier.cuh | 211 - ml-xgboost/cub/cub/grid/grid_even_share.cuh | 185 - ml-xgboost/cub/cub/grid/grid_mapping.cuh | 95 - ml-xgboost/cub/cub/grid/grid_queue.cuh | 220 - ml-xgboost/cub/cub/host/mutex.cuh | 167 - .../cub/iterator/arg_index_input_iterator.cuh | 259 - .../cache_modified_input_iterator.cuh | 240 - .../cache_modified_output_iterator.cuh | 254 - .../cub/iterator/constant_input_iterator.cuh | 235 - .../cub/iterator/counting_input_iterator.cuh | 228 - .../cub/iterator/discard_output_iterator.cuh | 222 - .../cub/iterator/tex_obj_input_iterator.cuh | 310 - .../cub/iterator/tex_ref_input_iterator.cuh | 374 - .../cub/iterator/transform_input_iterator.cuh | 252 - ml-xgboost/cub/cub/thread/thread_load.cuh | 438 - .../cub/cub/thread/thread_operators.cuh | 317 - ml-xgboost/cub/cub/thread/thread_reduce.cuh | 169 - ml-xgboost/cub/cub/thread/thread_scan.cuh | 283 - ml-xgboost/cub/cub/thread/thread_search.cuh | 154 - ml-xgboost/cub/cub/thread/thread_store.cuh | 422 - ml-xgboost/cub/cub/util_allocator.cuh | 708 -- ml-xgboost/cub/cub/util_arch.cuh | 144 - ml-xgboost/cub/cub/util_debug.cuh | 145 - ml-xgboost/cub/cub/util_device.cuh | 347 - ml-xgboost/cub/cub/util_macro.cuh | 103 - ml-xgboost/cub/cub/util_namespace.cuh | 46 - ml-xgboost/cub/cub/util_ptx.cuh | 673 -- ml-xgboost/cub/cub/util_type.cuh | 1141 --- .../warp/specializations/warp_reduce_shfl.cuh | 549 -- .../warp/specializations/warp_reduce_smem.cuh | 373 - .../warp/specializations/warp_scan_shfl.cuh | 650 -- .../warp/specializations/warp_scan_smem.cuh | 395 - ml-xgboost/cub/cub/warp/warp_reduce.cuh | 612 -- ml-xgboost/cub/cub/warp/warp_scan.cuh | 936 -- ml-xgboost/cub/eclipse code style profile.xml | 155 - ml-xgboost/cub/examples/block/.gitignore | 7 - ml-xgboost/cub/examples/block/Makefile | 128 - .../block/example_block_radix_sort.cu | 323 - .../examples/block/example_block_reduce.cu | 290 - .../cub/examples/block/example_block_scan.cu | 334 - .../cub/examples/block/reduce_by_key.cu | 57 - ml-xgboost/cub/examples/device/.gitignore | 8 - ml-xgboost/cub/examples/device/Makefile | 197 - .../example_device_partition_flagged.cu | 233 - .../device/example_device_partition_if.cu | 244 - .../device/example_device_radix_sort.cu | 226 - .../examples/device/example_device_reduce.cu | 180 - .../examples/device/example_device_scan.cu | 186 - .../device/example_device_select_flagged.cu | 233 - .../device/example_device_select_if.cu | 242 - .../device/example_device_select_unique.cu | 221 - ...ample_device_sort_find_non_trivial_runs.cu | 384 - ml-xgboost/cub/experimental/.gitignore | 1 - ml-xgboost/cub/experimental/Makefile | 125 - .../experimental/defunct/example_coo_spmv.cu | 1070 --- .../defunct/test_device_seg_reduce.cu | 2142 ----- .../experimental/histogram/histogram_cub.h | 109 - .../histogram/histogram_gmem_atomics.h | 185 - .../histogram/histogram_smem_atomics.h | 195 - .../cub/experimental/histogram_compare.cu | 635 -- ml-xgboost/cub/experimental/sparse_matrix.h | 1244 --- ml-xgboost/cub/experimental/spmv_compare.cu | 917 -- ml-xgboost/cub/experimental/spmv_script.sh | 30 - ml-xgboost/cub/test/.gitignore | 2 - ml-xgboost/cub/test/Makefile | 453 - ml-xgboost/cub/test/link_a.cu | 11 - ml-xgboost/cub/test/link_b.cu | 11 - ml-xgboost/cub/test/link_main.cpp | 10 - ml-xgboost/cub/test/mersenne.h | 160 - ml-xgboost/cub/test/test_allocator.cu | 459 - ml-xgboost/cub/test/test_block_histogram.cu | 310 - ml-xgboost/cub/test/test_block_load_store.cu | 549 -- ml-xgboost/cub/test/test_block_radix_sort.cu | 717 -- ml-xgboost/cub/test/test_block_reduce.cu | 822 -- ml-xgboost/cub/test/test_block_scan.cu | 929 -- ml-xgboost/cub/test/test_device_histogram.cu | 1564 ---- ml-xgboost/cub/test/test_device_radix_sort.cu | 1273 --- ml-xgboost/cub/test/test_device_reduce.cu | 1307 --- .../cub/test/test_device_reduce_by_key.cu | 853 -- .../cub/test/test_device_run_length_encode.cu | 890 -- ml-xgboost/cub/test/test_device_scan.cu | 1015 -- ml-xgboost/cub/test/test_device_select_if.cu | 1039 --- .../cub/test/test_device_select_unique.cu | 648 -- ml-xgboost/cub/test/test_grid_barrier.cu | 152 - ml-xgboost/cub/test/test_iterator.cu | 805 -- ml-xgboost/cub/test/test_util.h | 1600 ---- ml-xgboost/cub/test/test_warp_reduce.cu | 837 -- ml-xgboost/cub/test/test_warp_scan.cu | 630 -- ml-xgboost/cub/tune/.gitignore | 1 - ml-xgboost/cub/tune/Makefile | 192 - ml-xgboost/cub/tune/tune_device_reduce.cu | 763 -- ml-xgboost/demo/.gitignore | 2 - ml-xgboost/demo/README.md | 147 - .../demo/aft_survival/aft_survival_demo.py | 54 - .../aft_survival_demo_with_optuna.py | 78 - .../aft_survival/aft_survival_viz_demo.py | 97 - .../demo/binary_classification/README.md | 164 - .../agaricus-lepiota.data | 8124 ----------------- .../agaricus-lepiota.fmap | 32 - .../agaricus-lepiota.names | 148 - .../demo/binary_classification/mapfeat.py | 47 - .../demo/binary_classification/mknfold.py | 29 - .../demo/binary_classification/mushroom.conf | 29 - .../demo/binary_classification/runexp.sh | 15 - ml-xgboost/demo/c-api/CMakeLists.txt | 4 - ml-xgboost/demo/c-api/Makefile | 19 - ml-xgboost/demo/c-api/README.md | 30 - ml-xgboost/demo/c-api/c-api-demo.c | 88 - ml-xgboost/demo/dask/README.md | 6 - ml-xgboost/demo/dask/cpu_training.py | 41 - ml-xgboost/demo/dask/gpu_training.py | 45 - ml-xgboost/demo/dask/sklearn_cpu_training.py | 39 - ml-xgboost/demo/dask/sklearn_gpu_training.py | 42 - ml-xgboost/demo/data/README.md | 2 - ml-xgboost/demo/data/agaricus.txt.test | 1611 ---- ml-xgboost/demo/data/agaricus.txt.train | 6513 ------------- ml-xgboost/demo/data/dermatology.data.test | 110 - ml-xgboost/demo/data/dermatology.data.train | 256 - ml-xgboost/demo/data/dermatology_process.py | 32 - ml-xgboost/demo/data/featmap.txt | 126 - ml-xgboost/demo/data/gen_autoclaims.R | 18 - ml-xgboost/demo/data/veterans_lung_cancer.csv | 138 - .../demo/distributed-training/README.md | 27 - .../distributed-training/mushroom.aws.conf | 27 - .../distributed-training/plot_model.ipynb | 107 - .../demo/distributed-training/run_aws.sh | 11 - ml-xgboost/demo/gpu_acceleration/README.md | 5 - .../demo/gpu_acceleration/cover_type.py | 40 - ml-xgboost/demo/gpu_acceleration/memory.py | 51 - ml-xgboost/demo/guide-python/README.md | 16 - .../demo/guide-python/basic_walkthrough.py | 92 - .../guide-python/boost_from_prediction.py | 25 - .../demo/guide-python/cross_validation.py | 62 - .../demo/guide-python/custom_objective.py | 49 - ml-xgboost/demo/guide-python/custom_rmsle.py | 197 - .../demo/guide-python/custom_softmax.py | 148 - ml-xgboost/demo/guide-python/evals_result.py | 30 - .../demo/guide-python/external_memory.py | 25 - .../demo/guide-python/gamma_regression.py | 25 - .../guide-python/generalized_linear_model.py | 30 - .../demo/guide-python/predict_first_ntree.py | 20 - .../demo/guide-python/predict_leaf_indices.py | 19 - ml-xgboost/demo/guide-python/runall.sh | 13 - .../demo/guide-python/sklearn_evals_result.py | 43 - .../demo/guide-python/sklearn_examples.py | 76 - .../demo/guide-python/sklearn_parallel.py | 35 - ml-xgboost/demo/json-model/README.md | 3 - ml-xgboost/demo/json-model/json_parser.py | 180 - ml-xgboost/demo/kaggle-higgs/README.md | 31 - ml-xgboost/demo/kaggle-higgs/higgs-cv.py | 37 - ml-xgboost/demo/kaggle-higgs/higgs-numpy.py | 54 - ml-xgboost/demo/kaggle-higgs/higgs-pred.R | 24 - ml-xgboost/demo/kaggle-higgs/higgs-pred.py | 50 - ml-xgboost/demo/kaggle-higgs/higgs-train.R | 33 - ml-xgboost/demo/kaggle-higgs/run.sh | 14 - ml-xgboost/demo/kaggle-higgs/speedtest.R | 71 - ml-xgboost/demo/kaggle-higgs/speedtest.py | 63 - ml-xgboost/demo/kaggle-otto/README.MD | 22 - ml-xgboost/demo/kaggle-otto/otto_train_pred.R | 43 - .../kaggle-otto/understandingXGBoostModel.Rmd | 231 - .../demo/multiclass_classification/README.md | 10 - .../demo/multiclass_classification/runexp.sh | 9 - .../demo/multiclass_classification/train.R | 64 - .../demo/multiclass_classification/train.py | 51 - ml-xgboost/demo/rank/README.md | 41 - ml-xgboost/demo/rank/mq2008.conf | 28 - ml-xgboost/demo/rank/rank.py | 41 - ml-xgboost/demo/rank/rank_sklearn.py | 34 - ml-xgboost/demo/rank/runexp.sh | 5 - ml-xgboost/demo/rank/trans_data.py | 41 - ml-xgboost/demo/rank/wgetdata.sh | 10 - ml-xgboost/demo/regression/README.md | 17 - ml-xgboost/demo/regression/machine.conf | 30 - ml-xgboost/demo/regression/machine.data | 209 - ml-xgboost/demo/regression/machine.names | 72 - ml-xgboost/demo/regression/mapfeat.py | 31 - ml-xgboost/demo/regression/mknfold.py | 29 - ml-xgboost/demo/regression/runexp.sh | 16 - ml-xgboost/demo/yearpredMSD/README.md | 9 - ml-xgboost/demo/yearpredMSD/csv2libsvm.py | 14 - ml-xgboost/demo/yearpredMSD/runexp.sh | 17 - ml-xgboost/demo/yearpredMSD/yearpredMSD.conf | 29 - ml-xgboost/dev/query_contributors.py | 63 - ml-xgboost/dmlc-core/.editorconfig | 7 - ml-xgboost/dmlc-core/.gitignore | 48 - ml-xgboost/dmlc-core/CMakeLists.txt | 258 - ml-xgboost/dmlc-core/LICENSE | 201 - ml-xgboost/dmlc-core/Makefile | 108 - ml-xgboost/dmlc-core/README.md | 45 - ml-xgboost/dmlc-core/appveyor.yml | 134 - .../dmlc-core/cmake/Modules/FindASan.cmake | 13 - .../dmlc-core/cmake/Modules/FindHDFS.cmake | 72 - .../dmlc-core/cmake/Modules/FindLSan.cmake | 13 - .../dmlc-core/cmake/Modules/FindTSan.cmake | 13 - .../dmlc-core/cmake/Modules/FindUBSan.cmake | 13 - ml-xgboost/dmlc-core/cmake/Sanitizer.cmake | 63 - ml-xgboost/dmlc-core/cmake/Utils.cmake | 381 - ml-xgboost/dmlc-core/cmake/build_config.h.in | 27 - .../dmlc-core/cmake/dmlc-config.cmake.in | 5 - ml-xgboost/dmlc-core/cmake/gtest_cmake.in | 15 - ml-xgboost/dmlc-core/cmake/lint.cmake | 21 - ml-xgboost/dmlc-core/doc/.gitignore | 3 - ml-xgboost/dmlc-core/doc/Doxyfile | 2406 ----- ml-xgboost/dmlc-core/doc/Makefile | 192 - ml-xgboost/dmlc-core/doc/README | 7 - ml-xgboost/dmlc-core/doc/build.md | 16 - ml-xgboost/dmlc-core/doc/conf.py | 165 - ml-xgboost/dmlc-core/doc/index.md | 18 - ml-xgboost/dmlc-core/doc/parameter.md | 198 - ml-xgboost/dmlc-core/doc/sphinx_util.py | 16 - ml-xgboost/dmlc-core/example/dmlc_example.mk | 9 - ml-xgboost/dmlc-core/example/parameter.cc | 64 - ml-xgboost/dmlc-core/include/dmlc/any.h | 427 - .../dmlc-core/include/dmlc/array_view.h | 128 - ml-xgboost/dmlc-core/include/dmlc/base.h | 303 - .../include/dmlc/blockingconcurrentqueue.h | 991 -- .../include/dmlc/build_config_default.h | 39 - ml-xgboost/dmlc-core/include/dmlc/common.h | 91 - .../dmlc-core/include/dmlc/concurrency.h | 263 - .../dmlc-core/include/dmlc/concurrentqueue.h | 3719 -------- ml-xgboost/dmlc-core/include/dmlc/config.h | 186 - ml-xgboost/dmlc-core/include/dmlc/data.h | 397 - ml-xgboost/dmlc-core/include/dmlc/endian.h | 63 - .../dmlc-core/include/dmlc/filesystem.h | 158 - .../include/dmlc/input_split_shuffle.h | 168 - ml-xgboost/dmlc-core/include/dmlc/io.h | 635 -- ml-xgboost/dmlc-core/include/dmlc/json.h | 983 -- ml-xgboost/dmlc-core/include/dmlc/logging.h | 470 - ml-xgboost/dmlc-core/include/dmlc/lua.h | 739 -- ml-xgboost/dmlc-core/include/dmlc/memory.h | 263 - ml-xgboost/dmlc-core/include/dmlc/memory_io.h | 105 - ml-xgboost/dmlc-core/include/dmlc/omp.h | 50 - ml-xgboost/dmlc-core/include/dmlc/optional.h | 271 - ml-xgboost/dmlc-core/include/dmlc/parameter.h | 1181 --- ml-xgboost/dmlc-core/include/dmlc/recordio.h | 196 - ml-xgboost/dmlc-core/include/dmlc/registry.h | 310 - .../dmlc-core/include/dmlc/serializer.h | 410 - ml-xgboost/dmlc-core/include/dmlc/strtonum.h | 737 -- .../dmlc-core/include/dmlc/thread_group.h | 808 -- .../dmlc-core/include/dmlc/thread_local.h | 85 - .../dmlc-core/include/dmlc/threadediter.h | 512 -- ml-xgboost/dmlc-core/include/dmlc/timer.h | 49 - .../dmlc-core/include/dmlc/type_traits.h | 192 - ml-xgboost/dmlc-core/make/dmlc.mk | 95 - ml-xgboost/dmlc-core/scripts/lint.py | 208 - ml-xgboost/dmlc-core/scripts/packages.mk | 19 - ml-xgboost/dmlc-core/scripts/s390x/Dockerfile | 28 - .../scripts/s390x/build_via_cmake.sh | 9 - .../dmlc-core/scripts/s390x/ci_build.sh | 86 - .../dmlc-core/scripts/s390x/entrypoint.sh | 43 - ml-xgboost/dmlc-core/scripts/test_script.sh | 65 - ml-xgboost/dmlc-core/src/config.cc | 279 - ml-xgboost/dmlc-core/src/data.cc | 258 - .../dmlc-core/src/data/basic_row_iter.h | 85 - ml-xgboost/dmlc-core/src/data/csv_parser.h | 150 - ml-xgboost/dmlc-core/src/data/disk_row_iter.h | 145 - ml-xgboost/dmlc-core/src/data/libfm_parser.h | 148 - ml-xgboost/dmlc-core/src/data/libsvm_parser.h | 173 - ml-xgboost/dmlc-core/src/data/parser.h | 130 - ml-xgboost/dmlc-core/src/data/row_block.h | 218 - ml-xgboost/dmlc-core/src/data/text_parser.h | 150 - ml-xgboost/dmlc-core/src/io.cc | 145 - ml-xgboost/dmlc-core/src/io/azure_filesys.cc | 92 - ml-xgboost/dmlc-core/src/io/azure_filesys.h | 57 - .../dmlc-core/src/io/cached_input_split.h | 193 - ml-xgboost/dmlc-core/src/io/filesys.cc | 61 - ml-xgboost/dmlc-core/src/io/hdfs_filesys.cc | 193 - ml-xgboost/dmlc-core/src/io/hdfs_filesys.h | 82 - .../src/io/indexed_recordio_split.cc | 235 - .../dmlc-core/src/io/indexed_recordio_split.h | 87 - .../dmlc-core/src/io/input_split_base.cc | 308 - .../dmlc-core/src/io/input_split_base.h | 197 - ml-xgboost/dmlc-core/src/io/line_split.cc | 58 - ml-xgboost/dmlc-core/src/io/line_split.h | 41 - ml-xgboost/dmlc-core/src/io/local_filesys.cc | 217 - ml-xgboost/dmlc-core/src/io/local_filesys.h | 64 - ml-xgboost/dmlc-core/src/io/recordio_split.cc | 84 - ml-xgboost/dmlc-core/src/io/recordio_split.h | 44 - ml-xgboost/dmlc-core/src/io/s3_filesys.cc | 1309 --- ml-xgboost/dmlc-core/src/io/s3_filesys.h | 103 - .../dmlc-core/src/io/single_file_split.h | 182 - .../src/io/single_threaded_input_split.h | 87 - .../dmlc-core/src/io/threaded_input_split.h | 105 - ml-xgboost/dmlc-core/src/io/uri_spec.h | 79 - ml-xgboost/dmlc-core/src/recordio.cc | 157 - ml-xgboost/dmlc-core/test/.gitignore | 2 - ml-xgboost/dmlc-core/test/README.md | 32 - ml-xgboost/dmlc-core/test/csv_parser_test.cc | 61 - ml-xgboost/dmlc-core/test/dataiter_test.cc | 31 - ml-xgboost/dmlc-core/test/dmlc_test.mk | 30 - ml-xgboost/dmlc-core/test/filesys_test.cc | 59 - ml-xgboost/dmlc-core/test/iostream_test.cc | 25 - .../dmlc-core/test/libfm_parser_test.cc | 36 - .../dmlc-core/test/libsvm_parser_test.cc | 36 - ml-xgboost/dmlc-core/test/logging_test.cc | 14 - ml-xgboost/dmlc-core/test/parameter_test.cc | 81 - ml-xgboost/dmlc-core/test/recordio_test.cc | 117 - ml-xgboost/dmlc-core/test/registry_test.cc | 60 - ml-xgboost/dmlc-core/test/split_read_test.cc | 37 - .../dmlc-core/test/split_repeat_read_test.cc | 58 - ml-xgboost/dmlc-core/test/split_test.cc | 25 - ml-xgboost/dmlc-core/test/stream_read_test.cc | 48 - ml-xgboost/dmlc-core/test/strtonum_test.cc | 39 - ml-xgboost/dmlc-core/test/unittest/.gitignore | 2 - .../dmlc-core/test/unittest/CMakeLists.txt | 68 - .../dmlc-core/test/unittest/build_config.h.in | 1 - .../dmlc-core/test/unittest/dmlc_unittest.mk | 13 - .../dmlc-core/test/unittest/unittest_any.cc | 78 - .../test/unittest/unittest_array_view.cc | 20 - .../test/unittest/unittest_config.cc | 115 - .../dmlc-core/test/unittest/unittest_env.cc | 36 - .../test/unittest/unittest_inputsplit.cc | 194 - .../dmlc-core/test/unittest/unittest_json.cc | 130 - .../test/unittest/unittest_lockfree.cc | 138 - .../test/unittest/unittest_logging.cc | 19 - .../dmlc-core/test/unittest/unittest_main.cc | 8 - .../test/unittest/unittest_optional.cc | 204 - .../dmlc-core/test/unittest/unittest_param.cc | 189 - .../test/unittest/unittest_parser.cc | 533 -- .../test/unittest/unittest_serializer.cc | 126 - .../test/unittest/unittest_tempdir.cc | 77 - .../test/unittest/unittest_thread_group.cc | 240 - .../test/unittest/unittest_threaditer.cc | 75 - .../unittest_threaditer_exc_handling.cc | 175 - ml-xgboost/dmlc-core/tracker/README.md | 42 - ml-xgboost/dmlc-core/tracker/dmlc-submit | 9 - .../tracker/dmlc_tracker/__init__.py | 2 - .../tracker/dmlc_tracker/kubernetes.py | 143 - .../tracker/dmlc_tracker/launcher.py | 85 - .../dmlc-core/tracker/dmlc_tracker/local.py | 77 - .../dmlc-core/tracker/dmlc_tracker/mesos.py | 104 - .../dmlc-core/tracker/dmlc_tracker/mpi.py | 82 - .../dmlc-core/tracker/dmlc_tracker/opts.py | 180 - .../dmlc-core/tracker/dmlc_tracker/sge.py | 48 - .../dmlc-core/tracker/dmlc_tracker/slurm.py | 65 - .../dmlc-core/tracker/dmlc_tracker/ssh.py | 99 - .../dmlc-core/tracker/dmlc_tracker/submit.py | 56 - .../dmlc-core/tracker/dmlc_tracker/tracker.py | 487 - .../dmlc-core/tracker/dmlc_tracker/util.py | 13 - .../dmlc-core/tracker/dmlc_tracker/yarn.py | 131 - ml-xgboost/dmlc-core/tracker/yarn/.gitignore | 4 - ml-xgboost/dmlc-core/tracker/yarn/README.md | 5 - ml-xgboost/dmlc-core/tracker/yarn/build.bat | 5 - ml-xgboost/dmlc-core/tracker/yarn/build.sh | 8 - ml-xgboost/dmlc-core/tracker/yarn/pom.xml | 168 - .../hadoop/yarn/dmlc/ApplicationMaster.java | 689 -- .../org/apache/hadoop/yarn/dmlc/Client.java | 350 - .../apache/hadoop/yarn/dmlc/TaskRecord.java | 27 - ml-xgboost/dmlc-core/windows/.gitignore | 11 - ml-xgboost/dmlc-core/windows/README.md | 5 - ml-xgboost/dmlc-core/windows/dmlc.sln | 54 - .../dmlc-core/windows/dmlc/dmlc.vcxproj | 165 - ml-xgboost/doc/.gitignore | 8 - ml-xgboost/doc/Doxyfile.in | 2353 ----- ml-xgboost/doc/Makefile | 192 - ml-xgboost/doc/R-package/.gitignore | 1 - ml-xgboost/doc/R-package/Makefile | 15 - ml-xgboost/doc/R-package/discoverYourData.md | 471 - ml-xgboost/doc/R-package/index.rst | 28 - .../doc/R-package/xgboostPresentation.md | 589 -- ml-xgboost/doc/README | 5 - ml-xgboost/doc/build.rst | 466 - ml-xgboost/doc/c++.rst | 12 - ml-xgboost/doc/c.rst | 12 - ml-xgboost/doc/cli.rst | 5 - ml-xgboost/doc/conf.py | 236 - ml-xgboost/doc/contrib/coding_guide.rst | 136 - ml-xgboost/doc/contrib/community.rst | 35 - ml-xgboost/doc/contrib/docs.rst | 30 - ml-xgboost/doc/contrib/donate.rst | 44 - ml-xgboost/doc/contrib/git_guide.rst | 76 - ml-xgboost/doc/contrib/index.rst | 29 - ml-xgboost/doc/contrib/release.rst | 13 - ml-xgboost/doc/contrib/unit_tests.rst | 179 - ml-xgboost/doc/dump.schema | 55 - ml-xgboost/doc/faq.rst | 77 - ml-xgboost/doc/get_started.rst | 94 - ml-xgboost/doc/gpu/index.rst | 245 - ml-xgboost/doc/index.rst | 33 - ml-xgboost/doc/julia.rst | 5 - ml-xgboost/doc/jvm/index.rst | 218 - ml-xgboost/doc/jvm/java_intro.rst | 158 - ml-xgboost/doc/jvm/javadocs/index.rst | 3 - .../jvm/scaladocs/xgboost4j-flink/index.rst | 3 - .../jvm/scaladocs/xgboost4j-spark/index.rst | 3 - .../doc/jvm/scaladocs/xgboost4j/index.rst | 3 - .../doc/jvm/xgboost4j_spark_tutorial.rst | 559 -- ml-xgboost/doc/model.schema | 446 - ml-xgboost/doc/parameter.rst | 461 - ml-xgboost/doc/python/convert_090to100.py | 79 - ml-xgboost/doc/python/index.rst | 14 - ml-xgboost/doc/python/python_api.rst | 94 - ml-xgboost/doc/python/python_intro.rst | 228 - ml-xgboost/doc/requirements.txt | 8 - ml-xgboost/doc/sphinx_util.py | 16 - .../doc/tutorials/aft_survival_analysis.rst | 168 - ml-xgboost/doc/tutorials/aws_yarn.rst | 8 - .../doc/tutorials/custom_metric_obj.rst | 147 - ml-xgboost/doc/tutorials/dart.rst | 111 - ml-xgboost/doc/tutorials/dask.rst | 144 - ml-xgboost/doc/tutorials/external_memory.rst | 72 - .../feature_interaction_constraint.rst | 277 - ml-xgboost/doc/tutorials/index.rst | 26 - ml-xgboost/doc/tutorials/input_format.rst | 112 - ml-xgboost/doc/tutorials/kubernetes.rst | 34 - ml-xgboost/doc/tutorials/model.rst | 268 - ml-xgboost/doc/tutorials/monotonic.rst | 95 - ml-xgboost/doc/tutorials/param_tuning.rst | 60 - ml-xgboost/doc/tutorials/rf.rst | 106 - ml-xgboost/doc/tutorials/saving_model.rst | 231 - ml-xgboost/include/xgboost/base.h | 269 - ml-xgboost/include/xgboost/c_api.h | 687 -- ml-xgboost/include/xgboost/data.h | 575 -- ml-xgboost/include/xgboost/feature_map.h | 94 - ml-xgboost/include/xgboost/gbm.h | 229 - .../include/xgboost/generic_parameters.h | 92 - .../include/xgboost/host_device_vector.h | 141 - ml-xgboost/include/xgboost/json.h | 567 -- ml-xgboost/include/xgboost/json_io.h | 166 - ml-xgboost/include/xgboost/learner.h | 264 - ml-xgboost/include/xgboost/linear_updater.h | 79 - ml-xgboost/include/xgboost/logging.h | 165 - ml-xgboost/include/xgboost/metric.h | 106 - ml-xgboost/include/xgboost/model.h | 46 - ml-xgboost/include/xgboost/objective.h | 108 - ml-xgboost/include/xgboost/parameter.h | 131 - ml-xgboost/include/xgboost/predictor.h | 242 - ml-xgboost/include/xgboost/span.h | 661 -- ml-xgboost/include/xgboost/tree_model.h | 658 -- ml-xgboost/include/xgboost/tree_updater.h | 141 - ml-xgboost/include/xgboost/version_config.h | 11 - ml-xgboost/jvm-packages/.gitignore | 3 - ml-xgboost/jvm-packages/CMakeLists.txt | 26 - ml-xgboost/jvm-packages/README.md | 120 - .../boostkit-xgboost4j-example/LICENSE | 15 - .../boostkit-xgboost4j-example/README.md | 29 - .../boostkit-xgboost4j-example/pom.xml | 69 - .../java/example/BasicWalkThrough.java | 131 - .../java/example/BoostFromPrediction.java | 62 - .../java/example/CrossValidation.java | 55 - .../java/example/CustomObjective.java | 168 - .../java/example/ExternalMemory.java | 61 - .../java/example/GeneralizedLinearModel.java | 70 - .../java/example/PredictFirstNtree.java | 66 - .../java/example/PredictLeafIndices.java | 66 - .../java/example/util/CustomEval.java | 61 - .../java/example/util/DataLoader.java | 123 - .../scala/example/BasicWalkThrough.scala | 103 - .../scala/example/BoostFromPrediction.scala | 53 - .../scala/example/CrossValidation.scala | 46 - .../scala/example/CustomObjective.scala | 158 - .../scala/example/ExternalMemory.scala | 59 - .../example/GeneralizedLinearModel.scala | 60 - .../scala/example/PredictFirstNTree.scala | 53 - .../scala/example/PredictLeafIndices.scala | 56 - .../example/flink/DistTrainWithFlink.scala | 41 - .../example/spark/SparkMLlibPipeline.scala | 135 - .../scala/example/spark/SparkTraining.scala | 80 - .../scala/example/util/CustomEval.scala | 60 - .../boostkit-xgboost4j-flink/pom.xml | 79 - .../dmlc/xgboost4j/scala/flink/XGBoost.scala | 99 - .../xgboost4j/scala/flink/XGBoostModel.scala | 67 - .../boostkit-xgboost4j-spark-client/pom.xml | 90 - .../dmlc/xgboost4j/scala/spark/XGBoost.scala | 240 - .../spark/params/LearningTaskParams.scala | 127 - .../pom.xml | 95 - .../xgboost4j/scala/spark/XGBoostUtil.scala | 39 - .../checkstyle-suppressions.xml | 32 - .../boostkit-xgboost4j-spark/checkstyle.xml | 162 - .../boostkit-xgboost4j-spark/pom.xml | 83 - .../scalastyle-config.xml | 276 - .../xgboost4j/scala/spark/DataUtils.scala | 179 - .../dmlc/xgboost4j/scala/spark/XGBoost.scala | 1000 -- .../scala/spark/XGBoostClassifier.scala | 548 -- .../scala/spark/XGBoostEstimatorCommon.scala | 37 - .../scala/spark/XGBoostRegressor.scala | 477 - .../scala/spark/XGBoostTrainingSummary.scala | 41 - .../dmlc/xgboost4j/scala/spark/package.scala | 48 - .../scala/spark/params/BoosterParams.scala | 303 - .../scala/spark/params/CustomParams.scala | 86 - .../params/DefaultXGBoostParamsReader.scala | 149 - .../params/DefaultXGBoostParamsWriter.scala | 89 - .../scala/spark/params/GeneralParams.scala | 314 - .../scala/spark/params/InferenceParams.scala | 32 - .../spark/params/LearningTaskParams.scala | 120 - .../spark/params/NonParamVariables.scala | 36 - .../scala/spark/params/RabitParams.scala | 40 - .../xgboost4j/scala/spark/params/Utils.scala | 33 - .../spark/SparkParallelismTracker.scala | 128 - .../jvm-packages/boostkit-xgboost4j/LICENSE | 15 - .../jvm-packages/boostkit-xgboost4j/pom.xml | 141 - .../java/ml/dmlc/xgboost4j/java/Booster.java | 792 -- .../java/ml/dmlc/xgboost4j/java/DMatrix.java | 347 - .../ml/dmlc/xgboost4j/java/DataBatch.java | 115 - .../java/ExternalCheckpointManager.java | 117 - .../ml/dmlc/xgboost4j/java/IEvaluation.java | 41 - .../ml/dmlc/xgboost4j/java/IObjective.java | 35 - .../ml/dmlc/xgboost4j/java/IRabitTracker.java | 44 - .../dmlc/xgboost4j/java/NativeLibLoader.java | 143 - .../java/ml/dmlc/xgboost4j/java/Rabit.java | 154 - .../ml/dmlc/xgboost4j/java/RabitTracker.java | 206 - .../xgboost4j/java/TrackerProperties.java | 56 - .../java/ml/dmlc/xgboost4j/java/XGBoost.java | 571 -- .../ml/dmlc/xgboost4j/java/XGBoostError.java | 31 - .../ml/dmlc/xgboost4j/java/XGBoostJNI.java | 145 - .../xgboost4j/java/util/BigDenseMatrix.java | 76 - .../dmlc/xgboost4j/java/util/UtilUnsafe.java | 46 - .../resources/xgboost4j-version.properties | 1 - .../ml/dmlc/xgboost4j/LabeledPoint.scala | 48 - .../ml/dmlc/xgboost4j/scala/Booster.scala | 337 - .../ml/dmlc/xgboost4j/scala/DMatrix.scala | 227 - .../ml/dmlc/xgboost4j/scala/EvalTrait.scala | 45 - .../scala/ExternalCheckpointManager.scala | 37 - .../dmlc/xgboost4j/scala/ObjectiveTrait.scala | 38 - .../ml/dmlc/xgboost4j/scala/XGBoost.scala | 197 - .../xgboost4j/scala/rabit/RabitTracker.scala | 195 - .../rabit/handler/RabitTrackerHandler.scala | 361 - .../rabit/handler/RabitWorkerHandler.scala | 467 - .../xgboost4j/scala/rabit/util/LinkMap.scala | 136 - .../rabit/util/RabitTrackerHelpers.scala | 39 - .../src/native/xgboost4j.cpp | 979 -- .../boostkit-xgboost4j/src/native/xgboost4j.h | 347 - .../jvm-packages/checkstyle-suppressions.xml | 32 - ml-xgboost/jvm-packages/checkstyle.xml | 162 - ml-xgboost/jvm-packages/create_jni.py | 130 - ml-xgboost/jvm-packages/dev/.gitattributes | 3 - ml-xgboost/jvm-packages/dev/.gitignore | 1 - ml-xgboost/jvm-packages/dev/Dockerfile | 50 - ml-xgboost/jvm-packages/dev/build-linux.cmd | 44 - ml-xgboost/jvm-packages/dev/build-linux.sh | 34 - ml-xgboost/jvm-packages/dev/change_version.sh | 43 - ml-xgboost/jvm-packages/dev/package-linux.sh | 36 - ml-xgboost/jvm-packages/pom.xml | 444 - ml-xgboost/jvm-packages/scalastyle-config.xml | 276 - ml-xgboost/kernel_include/README.md | 3 - .../boostkit_xgboost_kernel/bbgen.h | 108 - .../rabit_intrinsics.h | 18 - .../update_quantile_hist_kernel.h | 58 - .../CMakeLists.txt | 51 - .../boostkit_xgboost_kernel_client/bbgen.cpp | 59 - .../rabit_intrinsics.cpp | 36 - .../update_quantile_hist_kernel.cpp | 56 - ml-xgboost/plugin/CMakeLists.txt | 9 - ml-xgboost/plugin/README.md | 40 - .../plugin/dense_parser/dense_libsvm.cc | 87 - ml-xgboost/plugin/example/README.md | 19 - ml-xgboost/plugin/example/custom_obj.cc | 94 - .../plugin/lz4/sparse_page_lz4_format.cc | 333 - ml-xgboost/plugin/updater_gpu/README.md | 3 - ml-xgboost/python-package/.gitignore | 3 - ml-xgboost/python-package/.pylintrc | 26 - ml-xgboost/python-package/MANIFEST.in | 11 - ml-xgboost/python-package/README.rst | 20 - ml-xgboost/python-package/setup.cfg | 2 - ml-xgboost/python-package/setup.py | 319 - ml-xgboost/python-package/xgboost/VERSION | 1 - ml-xgboost/python-package/xgboost/__init__.py | 40 - ml-xgboost/python-package/xgboost/callback.py | 255 - ml-xgboost/python-package/xgboost/compat.py | 208 - ml-xgboost/python-package/xgboost/core.py | 2174 ----- ml-xgboost/python-package/xgboost/dask.py | 840 -- ml-xgboost/python-package/xgboost/libpath.py | 60 - ml-xgboost/python-package/xgboost/plotting.py | 254 - ml-xgboost/python-package/xgboost/rabit.py | 208 - ml-xgboost/python-package/xgboost/sklearn.py | 1264 --- ml-xgboost/python-package/xgboost/tracker.py | 337 - ml-xgboost/python-package/xgboost/training.py | 527 -- ml-xgboost/rabit/.gitignore | 52 - ml-xgboost/rabit/.travis.yml | 90 - ml-xgboost/rabit/CMakeLists.txt | 185 - ml-xgboost/rabit/LICENSE | 28 - ml-xgboost/rabit/Makefile | 102 - ml-xgboost/rabit/README.md | 40 - ml-xgboost/rabit/cmake/Config.cmake.in | 4 - .../rabit/cmake/googletest-download.cmake | 20 - ml-xgboost/rabit/cmake/googletest.cmake | 32 - ml-xgboost/rabit/doc/.gitignore | 5 - ml-xgboost/rabit/doc/Doxyfile | 281 - ml-xgboost/rabit/doc/Makefile | 192 - ml-xgboost/rabit/doc/conf.py | 184 - ml-xgboost/rabit/doc/cpp_api.md | 9 - ml-xgboost/rabit/doc/guide.md | 383 - ml-xgboost/rabit/doc/index.md | 24 - ml-xgboost/rabit/doc/parameters.md | 21 - ml-xgboost/rabit/doc/python-requirements.txt | 4 - ml-xgboost/rabit/doc/python_api.md | 11 - ml-xgboost/rabit/doc/sphinx_util.py | 16 - ml-xgboost/rabit/guide/Makefile | 26 - ml-xgboost/rabit/guide/README | 1 - ml-xgboost/rabit/guide/basic.cc | 35 - ml-xgboost/rabit/guide/basic.py | 27 - ml-xgboost/rabit/guide/broadcast.cc | 16 - ml-xgboost/rabit/guide/broadcast.py | 23 - ml-xgboost/rabit/guide/lazy_allreduce.cc | 34 - ml-xgboost/rabit/guide/lazy_allreduce.py | 31 - ml-xgboost/rabit/include/rabit/c_api.h | 196 - .../rabit/include/rabit/internal/engine.h | 346 - ml-xgboost/rabit/include/rabit/internal/io.h | 114 - .../rabit/include/rabit/internal/rabit-inl.h | 401 - .../rabit/include/rabit/internal/socket.h | 536 -- .../include/rabit/internal/thread_local.h | 87 - .../rabit/include/rabit/internal/timer.h | 41 - .../rabit/include/rabit/internal/utils.h | 231 - ml-xgboost/rabit/include/rabit/rabit.h | 460 - ml-xgboost/rabit/include/rabit/serializable.h | 26 - ml-xgboost/rabit/python/rabit.py | 364 - ml-xgboost/rabit/scripts/mpi_build.sh | 27 - ml-xgboost/rabit/scripts/travis_runtest.sh | 10 - ml-xgboost/rabit/scripts/travis_script.sh | 36 - ml-xgboost/rabit/scripts/travis_setup.sh | 35 - ml-xgboost/rabit/src/CMakeLists.txt | 31 - ml-xgboost/rabit/src/README.md | 6 - ml-xgboost/rabit/src/allreduce_base.cc | 965 -- ml-xgboost/rabit/src/allreduce_base.h | 587 -- ml-xgboost/rabit/src/allreduce_mock.h | 206 - ml-xgboost/rabit/src/allreduce_robust-inl.h | 169 - ml-xgboost/rabit/src/allreduce_robust.cc | 1589 ---- ml-xgboost/rabit/src/allreduce_robust.h | 672 -- ml-xgboost/rabit/src/c_api.cc | 344 - ml-xgboost/rabit/src/engine.cc | 145 - ml-xgboost/rabit/src/engine_base.cc | 15 - ml-xgboost/rabit/src/engine_empty.cc | 149 - ml-xgboost/rabit/src/engine_mock.cc | 16 - ml-xgboost/rabit/src/engine_mpi.cc | 252 - ml-xgboost/rabit/test/.gitignore | 3 - ml-xgboost/rabit/test/Makefile | 77 - ml-xgboost/rabit/test/README.md | 18 - ml-xgboost/rabit/test/cpp/CMakeLists.txt | 31 - ml-xgboost/rabit/test/cpp/README.md | 1 - .../rabit/test/cpp/allreduce_base_test.cc | 66 - .../rabit/test/cpp/allreduce_base_test.cpp | 66 - .../rabit/test/cpp/allreduce_mock_test.cc | 36 - .../rabit/test/cpp/allreduce_mock_test.cpp | 51 - .../rabit/test/cpp/allreduce_robust_test.cc | 233 - ml-xgboost/rabit/test/cpp/test_io.cc | 18 - ml-xgboost/rabit/test/cpp/test_main.cpp | 8 - ml-xgboost/rabit/test/lazy_recover.cc | 125 - ml-xgboost/rabit/test/local_recover.cc | 137 - ml-xgboost/rabit/test/local_recover.py | 32 - ml-xgboost/rabit/test/model_recover.cc | 157 - ml-xgboost/rabit/test/speed_runner.py | 34 - ml-xgboost/rabit/test/speed_test.cc | 99 - ml-xgboost/rabit/test/test.mk | 37 - ml-xgboost/src/CMakeLists.txt | 117 - ml-xgboost/src/c_api/c_api.cc | 800 -- ml-xgboost/src/c_api/c_api.cu | 111 - ml-xgboost/src/c_api/c_api_error.cc | 21 - ml-xgboost/src/c_api/c_api_error.h | 41 - ml-xgboost/src/cli_main.cc | 386 - ml-xgboost/src/common/base64.h | 272 - ml-xgboost/src/common/bitfield.h | 252 - ml-xgboost/src/common/column_matrix.h | 336 - ml-xgboost/src/common/common.cc | 33 - ml-xgboost/src/common/common.cu | 22 - ml-xgboost/src/common/common.h | 159 - ml-xgboost/src/common/compressed_iterator.h | 223 - ml-xgboost/src/common/config.h | 168 - ml-xgboost/src/common/device_helpers.cu | 95 - ml-xgboost/src/common/device_helpers.cuh | 916 -- ml-xgboost/src/common/group_data.h | 130 - ml-xgboost/src/common/hist_util.cc | 1336 --- ml-xgboost/src/common/hist_util.cu | 476 - ml-xgboost/src/common/hist_util.h | 703 -- ml-xgboost/src/common/host_device_vector.cc | 183 - ml-xgboost/src/common/host_device_vector.cu | 390 - ml-xgboost/src/common/io.cc | 148 - ml-xgboost/src/common/io.h | 92 - ml-xgboost/src/common/json.cc | 720 -- ml-xgboost/src/common/math.h | 196 - ml-xgboost/src/common/observer.h | 125 - .../src/common/probability_distribution.cc | 107 - .../src/common/probability_distribution.h | 95 - ml-xgboost/src/common/quantile.h | 855 -- ml-xgboost/src/common/random.h | 201 - ml-xgboost/src/common/row_set.h | 246 - ml-xgboost/src/common/survival_util.cc | 264 - ml-xgboost/src/common/survival_util.h | 86 - ml-xgboost/src/common/threading_utils.h | 134 - ml-xgboost/src/common/timer.cc | 128 - ml-xgboost/src/common/timer.cu | 38 - ml-xgboost/src/common/timer.h | 89 - ml-xgboost/src/common/transform.h | 211 - ml-xgboost/src/common/version.cc | 91 - ml-xgboost/src/common/version.h | 35 - ml-xgboost/src/data/adapter.h | 604 -- ml-xgboost/src/data/array_interface.h | 321 - ml-xgboost/src/data/data.cc | 747 -- ml-xgboost/src/data/data.cu | 87 - ml-xgboost/src/data/device_adapter.cuh | 217 - ml-xgboost/src/data/device_dmatrix.cu | 239 - ml-xgboost/src/data/device_dmatrix.h | 64 - ml-xgboost/src/data/ellpack_page.cc | 37 - ml-xgboost/src/data/ellpack_page.cu | 309 - ml-xgboost/src/data/ellpack_page.cuh | 234 - .../src/data/ellpack_page_raw_format.cu | 59 - ml-xgboost/src/data/ellpack_page_source.cc | 24 - ml-xgboost/src/data/ellpack_page_source.cu | 102 - ml-xgboost/src/data/ellpack_page_source.h | 65 - ml-xgboost/src/data/simple_batch_iterator.h | 33 - ml-xgboost/src/data/simple_dmatrix.cc | 201 - ml-xgboost/src/data/simple_dmatrix.cu | 138 - ml-xgboost/src/data/simple_dmatrix.h | 63 - ml-xgboost/src/data/sparse_page_dmatrix.cc | 59 - ml-xgboost/src/data/sparse_page_dmatrix.h | 72 - ml-xgboost/src/data/sparse_page_raw_format.cc | 118 - ml-xgboost/src/data/sparse_page_source.h | 519 -- ml-xgboost/src/data/sparse_page_writer.h | 213 - ml-xgboost/src/gbm/gblinear.cc | 324 - ml-xgboost/src/gbm/gblinear_model.cc | 38 - ml-xgboost/src/gbm/gblinear_model.h | 143 - ml-xgboost/src/gbm/gbm.cc | 40 - ml-xgboost/src/gbm/gbtree.cc | 829 -- ml-xgboost/src/gbm/gbtree.h | 309 - ml-xgboost/src/gbm/gbtree_model.cc | 86 - ml-xgboost/src/gbm/gbtree_model.h | 125 - ml-xgboost/src/learner.cc | 1091 --- ml-xgboost/src/linear/coordinate_common.h | 480 - ml-xgboost/src/linear/linear_updater.cc | 37 - ml-xgboost/src/linear/param.h | 71 - ml-xgboost/src/linear/updater_coordinate.cc | 105 - .../src/linear/updater_gpu_coordinate.cu | 261 - ml-xgboost/src/linear/updater_shotgun.cc | 104 - ml-xgboost/src/logging.cc | 109 - ml-xgboost/src/metric/elementwise_metric.cc | 8 - ml-xgboost/src/metric/elementwise_metric.cu | 389 - ml-xgboost/src/metric/metric.cc | 88 - ml-xgboost/src/metric/metric_common.h | 90 - ml-xgboost/src/metric/multiclass_metric.cc | 8 - ml-xgboost/src/metric/multiclass_metric.cu | 241 - ml-xgboost/src/metric/rank_metric.cc | 676 -- ml-xgboost/src/metric/rank_metric.cu | 709 -- ml-xgboost/src/metric/survival_metric.cc | 105 - ml-xgboost/src/objective/aft_obj.cc | 118 - ml-xgboost/src/objective/hinge.cc | 18 - ml-xgboost/src/objective/hinge.cu | 98 - ml-xgboost/src/objective/multiclass_obj.cc | 18 - ml-xgboost/src/objective/multiclass_obj.cu | 202 - ml-xgboost/src/objective/objective.cc | 51 - ml-xgboost/src/objective/rank_obj.cc | 17 - ml-xgboost/src/objective/rank_obj.cu | 954 -- ml-xgboost/src/objective/regression_loss.h | 142 - ml-xgboost/src/objective/regression_obj.cc | 18 - ml-xgboost/src/objective/regression_obj.cu | 569 -- ml-xgboost/src/predictor/cpu_predictor.cc | 487 - ml-xgboost/src/predictor/gpu_predictor.cu | 562 -- ml-xgboost/src/predictor/predictor.cc | 72 - ml-xgboost/src/tree/constraints.cc | 103 - ml-xgboost/src/tree/constraints.cu | 335 - ml-xgboost/src/tree/constraints.cuh | 168 - ml-xgboost/src/tree/constraints.h | 65 - .../tree/gpu_hist/gradient_based_sampler.cu | 382 - .../tree/gpu_hist/gradient_based_sampler.cuh | 151 - ml-xgboost/src/tree/gpu_hist/histogram.cu | 201 - ml-xgboost/src/tree/gpu_hist/histogram.cuh | 29 - .../src/tree/gpu_hist/row_partitioner.cu | 188 - .../src/tree/gpu_hist/row_partitioner.cuh | 203 - ml-xgboost/src/tree/param.cc | 110 - ml-xgboost/src/tree/param.h | 582 -- ml-xgboost/src/tree/split_evaluator.cc | 279 - ml-xgboost/src/tree/split_evaluator.h | 99 - ml-xgboost/src/tree/tree_model.cc | 1045 --- ml-xgboost/src/tree/tree_updater.cc | 43 - ml-xgboost/src/tree/updater_basemaker-inl.h | 484 - ml-xgboost/src/tree/updater_colmaker.cc | 633 -- ml-xgboost/src/tree/updater_gpu_common.cuh | 182 - ml-xgboost/src/tree/updater_gpu_hist.cu | 1138 --- ml-xgboost/src/tree/updater_histmaker.cc | 764 -- ml-xgboost/src/tree/updater_prune.cc | 123 - ml-xgboost/src/tree/updater_quantile_hist.cc | 1299 --- ml-xgboost/src/tree/updater_quantile_hist.h | 397 - ml-xgboost/src/tree/updater_refresh.cc | 167 - ml-xgboost/src/tree/updater_skmaker.cc | 404 - ml-xgboost/src/tree/updater_sync.cc | 67 - .../cpp/common/test_partition_builder.cc | 76 - pom.xml | 10 +- tools/kal-test/README.md | 49 - tools/kal-test/bin/graph/betweenness_run.sh | 121 - .../bin/graph/betweenness_run_opensource.sh | 129 - tools/kal-test/bin/graph/bfs_run.sh | 110 - .../kal-test/bin/graph/bfs_run_opensource.sh | 151 - tools/kal-test/bin/graph/cc_run.sh | 101 - tools/kal-test/bin/graph/cc_run_raw.sh | 92 - tools/kal-test/bin/graph/cd_run.sh | 102 - tools/kal-test/bin/graph/closeness_run.sh | 118 - .../kal-test/bin/graph/closeness_run_hive.sh | 67 - .../bin/graph/clusteringcoefficient_run.sh | 116 - .../clusteringcoefficient_run_opensource.sh | 129 - tools/kal-test/bin/graph/degree_run.sh | 123 - tools/kal-test/bin/graph/degree_run_raw.sh | 120 - tools/kal-test/bin/graph/incpr_run.sh | 140 - tools/kal-test/bin/graph/kcore_run.sh | 98 - tools/kal-test/bin/graph/kcore_run_hive.sh | 53 - tools/kal-test/bin/graph/kcore_run_raw.sh | 94 - tools/kal-test/bin/graph/louvain_run.sh | 107 - tools/kal-test/bin/graph/louvain_run_hive.sh | 65 - tools/kal-test/bin/graph/lpa_run.sh | 118 - tools/kal-test/bin/graph/lpa_run_raw.sh | 87 - tools/kal-test/bin/graph/mce_run.sh | 88 - tools/kal-test/bin/graph/mce_run_hive.sh | 60 - tools/kal-test/bin/graph/modularity_run.sh | 75 - tools/kal-test/bin/graph/mssp_run.sh | 101 - tools/kal-test/bin/graph/node2vec_run.sh | 113 - .../bin/graph/node2vec_run_opensource.sh | 126 - tools/kal-test/bin/graph/ppr_run.sh | 167 - tools/kal-test/bin/graph/ppr_run_raw.sh | 167 - tools/kal-test/bin/graph/pr_run.sh | 109 - tools/kal-test/bin/graph/pr_run_hive.sh | 62 - tools/kal-test/bin/graph/pr_run_raw.sh | 96 - tools/kal-test/bin/graph/scc_run.sh | 104 - tools/kal-test/bin/graph/scc_run_raw.sh | 94 - tools/kal-test/bin/graph/sgm_run.sh | 182 - .../kal-test/bin/graph/sgm_run_opensource.sh | 171 - tools/kal-test/bin/graph/tc_run.sh | 113 - tools/kal-test/bin/graph/tc_run_raw.sh | 103 - tools/kal-test/bin/graph/tr_run.sh | 116 - tools/kal-test/bin/graph/wce_run.sh | 76 - tools/kal-test/bin/graph/wce_run_hive.sh | 44 - tools/kal-test/bin/graph/wpr_run.sh | 124 - tools/kal-test/bin/ml/als_run.sh | 104 - tools/kal-test/bin/ml/als_run_raw.sh | 102 - tools/kal-test/bin/ml/cov_run.sh | 109 - tools/kal-test/bin/ml/cov_run_raw.sh | 106 - tools/kal-test/bin/ml/dbscan_run.sh | 98 - .../kal-test/bin/ml/dbscan_run_opensource.sh | 124 - tools/kal-test/bin/ml/dt_run.sh | 125 - tools/kal-test/bin/ml/dt_run_raw.sh | 123 - tools/kal-test/bin/ml/gbdt_run.sh | 106 - tools/kal-test/bin/ml/gbdt_run_raw.sh | 103 - tools/kal-test/bin/ml/idf_run.sh | 105 - tools/kal-test/bin/ml/idf_run_raw.sh | 101 - tools/kal-test/bin/ml/kmeans_run.sh | 106 - tools/kal-test/bin/ml/kmeans_run_raw.sh | 106 - tools/kal-test/bin/ml/knn_run.sh | 113 - tools/kal-test/bin/ml/knn_run_raw.sh | 111 - tools/kal-test/bin/ml/lda_run.sh | 103 - tools/kal-test/bin/ml/lda_run_raw.sh | 99 - tools/kal-test/bin/ml/linR_run.sh | 115 - tools/kal-test/bin/ml/linR_run_raw.sh | 113 - tools/kal-test/bin/ml/logR_run.sh | 99 - tools/kal-test/bin/ml/logR_run_raw.sh | 97 - tools/kal-test/bin/ml/pca_run.sh | 107 - tools/kal-test/bin/ml/pca_run_kml.sh | 112 - tools/kal-test/bin/ml/pca_run_raw.sh | 105 - tools/kal-test/bin/ml/pearson_run.sh | 116 - tools/kal-test/bin/ml/pearson_run_raw.sh | 115 - tools/kal-test/bin/ml/ps_run.sh | 98 - tools/kal-test/bin/ml/ps_run_raw.sh | 98 - tools/kal-test/bin/ml/rf_run.sh | 126 - tools/kal-test/bin/ml/rf_run_raw.sh | 122 - tools/kal-test/bin/ml/simrank_run.sh | 120 - tools/kal-test/bin/ml/spca_run.sh | 115 - tools/kal-test/bin/ml/spca_run_raw.sh | 113 - tools/kal-test/bin/ml/spearman_run.sh | 111 - tools/kal-test/bin/ml/spearman_run_raw.sh | 110 - tools/kal-test/bin/ml/svd_run.sh | 122 - tools/kal-test/bin/ml/svd_run_kml.sh | 127 - tools/kal-test/bin/ml/svd_run_raw.sh | 119 - tools/kal-test/bin/ml/svm_run.sh | 94 - tools/kal-test/bin/ml/svm_run_raw.sh | 90 - tools/kal-test/bin/ml/xgbt_run.sh | 121 - tools/kal-test/bin/ml/xgbt_run_raw.sh | 117 - tools/kal-test/bin/workflow.sh | 16 - .../conf/graph/betweenness/betweenness.yml | 20 - .../betweenness/betweenness_spark.properties | 52 - .../betweenness_spark_opensource.properties | 28 - tools/kal-test/conf/graph/bfs/bfs.yml | 38 - .../conf/graph/bfs/bfs_opensource.properties | 12 - .../conf/graph/bfs/bfs_source_id.properties | 6 - .../conf/graph/bfs/bfs_spark.properties | 62 - .../graph/bfs/bfs_spark_opensource.properties | 32 - tools/kal-test/conf/graph/cc/cc.yml | 14 - .../conf/graph/cc/cc_spark.properties | 19 - .../conf/graph/cc/cc_spark_raw.properties | 11 - tools/kal-test/conf/graph/cd/cd.yml | 18 - .../conf/graph/cd/cd_spark_aarch64.properties | 18 - .../conf/graph/cd/cd_spark_raw.properties | 18 - .../conf/graph/cd/cd_spark_x86_64.properties | 18 - .../conf/graph/closeness/closeness.yml | 18 - .../closeness/closeness_spark.properties | 76 - .../clusteringcoefficient.yml | 35 - .../clusteringcoefficient_spark.properties | 73 - ...ingcoefficient_spark_opensource.properties | 31 - tools/kal-test/conf/graph/degree/degree.yml | 26 - .../conf/graph/degree/degree_spark.properties | 93 - .../graph/degree/degree_spark_raw.properties | 47 - .../conf/graph/graph_datasets.properties | 76 - tools/kal-test/conf/graph/incpr/incpr.yml | 10 - .../conf/graph/incpr/incpr_spark.properties | 21 - tools/kal-test/conf/graph/kcore/kcore.yml | 25 - .../conf/graph/kcore/kcore_spark.properties | 45 - .../graph/kcore/kcore_spark_raw.properties | 24 - tools/kal-test/conf/graph/louvain/louvain.yml | 38 - .../graph/louvain/louvain_spark.properties | 74 - tools/kal-test/conf/graph/lpa/lpa.yml | 12 - .../conf/graph/lpa/lpa_spark.properties | 13 - .../conf/graph/lpa/lpa_spark_raw.properties | 18 - tools/kal-test/conf/graph/mce/mce.yml | 11 - .../conf/graph/mce/mce_spark.properties | 9 - .../conf/graph/modularity/modularity.yml | 56 - .../modularity/modularity_spark.properties | 60 - tools/kal-test/conf/graph/mssp/mssp.yml | 38 - .../conf/graph/mssp/mssp_spark.properties | 128 - .../kal-test/conf/graph/node2vec/node2vec.yml | 44 - .../graph/node2vec/node2vec_spark.properties | 13 - .../node2vec_spark_opensource.properties | 8 - tools/kal-test/conf/graph/ppr/ppr.yml | 23 - .../conf/graph/ppr/ppr_source_id.properties | 3 - .../conf/graph/ppr/ppr_spark.properties | 111 - .../conf/graph/ppr/ppr_spark_raw.properties | 57 - tools/kal-test/conf/graph/pr/pr.yml | 23 - .../conf/graph/pr/pr_spark.properties | 74 - .../conf/graph/pr/pr_spark_raw.properties | 39 - tools/kal-test/conf/graph/scc/scc.yml | 5 - .../conf/graph/scc/scc_spark.properties | 20 - tools/kal-test/conf/graph/scc/scc_x86.yml | 5 - tools/kal-test/conf/graph/sgm/sgm.yml | 6 - .../conf/graph/sgm/sgm_spark.properties | 354 - .../graph/sgm/sgm_spark_opensource.properties | 32 - tools/kal-test/conf/graph/tc/tc.yml | 8 - .../conf/graph/tc/tc_spark.properties | 11 - tools/kal-test/conf/graph/tr/tr.yml | 72 - .../conf/graph/tr/tr_spark.properties | 219 - tools/kal-test/conf/graph/wce/wce.yml | 26 - .../conf/graph/wce/wce_spark.properties | 30 - tools/kal-test/conf/graph/wpr/wpr.yml | 22 - .../conf/graph/wpr/wpr_spark.properties | 23 - tools/kal-test/conf/ml/als/als.yml | 128 - tools/kal-test/conf/ml/als/als_raw.yml | 65 - .../kal-test/conf/ml/als/als_spark.properties | 15 - .../conf/ml/als/als_spark_raw.properties | 9 - tools/kal-test/conf/ml/cov/cov.yml | 47 - .../kal-test/conf/ml/cov/cov_spark.properties | 19 - tools/kal-test/conf/ml/dbscan/dbscan.yml | 20 - .../conf/ml/dbscan/dbscan_spark.properties | 16 - .../dbscan/dbscan_spark_opensource.properties | 17 - tools/kal-test/conf/ml/dt/dt_arm.yml | 177 - tools/kal-test/conf/ml/dt/dt_spark.properties | 71 - .../conf/ml/dt/dt_spark_raw.properties | 39 - tools/kal-test/conf/ml/dt/dt_x86.yml | 177 - tools/kal-test/conf/ml/dt/dt_x86_raw.yml | 177 - tools/kal-test/conf/ml/gbdt/gbdt.yml | 119 - .../conf/ml/gbdt/gbdt_spark.properties | 16 - tools/kal-test/conf/ml/idf/idf.yml | 28 - .../kal-test/conf/ml/idf/idf_spark.properties | 59 - tools/kal-test/conf/ml/kmeans/kmeans.yml | 47 - .../conf/ml/kmeans/kmeans_spark.properties | 18 - tools/kal-test/conf/ml/knn/knn.yml | 59 - tools/kal-test/conf/ml/knn/knn_raw.yml | 30 - .../kal-test/conf/ml/knn/knn_spark.properties | 54 - .../conf/ml/knn/knn_spark_raw.properties | 29 - tools/kal-test/conf/ml/lda/lda.yml | 83 - .../kal-test/conf/ml/lda/lda_spark.properties | 16 - tools/kal-test/conf/ml/linR/linR.yml | 58 - tools/kal-test/conf/ml/linR/linR_raw.yml | 30 - .../conf/ml/linR/linR_spark.properties | 19 - .../conf/ml/linR/linR_spark_raw.properties | 13 - tools/kal-test/conf/ml/logR/logR.yml | 110 - .../conf/ml/logR/logR_spark.properties | 16 - tools/kal-test/conf/ml/ml_datasets.properties | 62 - tools/kal-test/conf/ml/pca/pca.yml | 75 - .../kal-test/conf/ml/pca/pca_spark.properties | 55 - tools/kal-test/conf/ml/pearson/pearson.yml | 36 - .../kal-test/conf/ml/pearson/pearson_raw.yml | 19 - .../conf/ml/pearson/pearson_spark.properties | 19 - .../ml/pearson/pearson_spark_raw.properties | 11 - tools/kal-test/conf/ml/ps/ps.yml | 111 - tools/kal-test/conf/ml/ps/ps_spark.properties | 16 - tools/kal-test/conf/ml/rf/rf_arm.yml | 233 - tools/kal-test/conf/ml/rf/rf_spark.properties | 89 - .../conf/ml/rf/rf_spark_raw.properties | 49 - tools/kal-test/conf/ml/rf/rf_x86.yml | 233 - tools/kal-test/conf/ml/rf/rf_x86_raw.yml | 233 - tools/kal-test/conf/ml/simrank/simrank.yml | 86 - .../conf/ml/simrank/simrank_spark.properties | 18 - tools/kal-test/conf/ml/spca/spca.yml | 42 - tools/kal-test/conf/ml/spca/spca_raw.yml | 42 - .../conf/ml/spca/spca_spark.properties | 70 - .../conf/ml/spca/spca_spark_raw.properties | 37 - tools/kal-test/conf/ml/spearman/spearman.yml | 29 - .../ml/spearman/spearman_spark.properties | 54 - tools/kal-test/conf/ml/svd/svd.yml | 34 - tools/kal-test/conf/ml/svd/svd_raw.yml | 34 - .../kal-test/conf/ml/svd/svd_spark.properties | 69 - .../conf/ml/svd/svd_spark_raw.properties | 37 - tools/kal-test/conf/ml/svm/svm.yml | 46 - .../kal-test/conf/ml/svm/svm_spark.properties | 18 - tools/kal-test/conf/ml/xgbt/xgbt_arm.yml | 66 - .../conf/ml/xgbt/xgbt_spark.properties | 77 - .../conf/ml/xgbt/xgbt_spark_raw.properties | 40 - tools/kal-test/conf/ml/xgbt/xgbt_x86.yml | 122 - tools/kal-test/pom.xml | 91 - .../scala/com/bigdata/graph/BFSRunner.scala | 124 - .../com/bigdata/graph/BetweennessRunner.scala | 118 - .../bigdata/graph/ClosenessHiveRunner.scala | 85 - .../com/bigdata/graph/ClosenessRunner.scala | 151 - .../graph/ClusteringCoefficientRunner.scala | 141 - .../graph/ConnectedComponentsRunner.scala | 98 - .../CycleDetectionWithConstrainsRunner.scala | 121 - .../com/bigdata/graph/DegreeRunner.scala | 123 - .../com/bigdata/graph/IncPageRankRunner.scala | 139 - .../graph/KCoreDecompositionHiveRunner.scala | 72 - .../graph/KCoreDecompositionRunner.scala | 95 - .../scala/com/bigdata/graph/KcoreMain.scala | 152 - .../graph/LabelPropagationRunner.scala | 109 - .../com/bigdata/graph/LouvainHiveRunner.scala | 87 - .../com/bigdata/graph/LouvainRunner.scala | 120 - .../scala/com/bigdata/graph/MSSPRunner.scala | 132 - .../MaximalCliqueEnumerationHiveRunner.scala | 84 - .../MaximalCliqueEnumerationRunner.scala | 89 - .../com/bigdata/graph/ModularityRunner.scala | 109 - .../com/bigdata/graph/Node2VecRunner.scala | 113 - .../bigdata/graph/PageRankHiveRunner.scala | 84 - .../com/bigdata/graph/PageRankRunner.scala | 138 - .../graph/PersonalizedPageRankRunner.scala | 138 - .../StronglyConnectedComponentsRunner.scala | 97 - .../graph/SubgraphMatchingRunner.scala | 112 - .../bigdata/graph/TrangleCountRunner.scala | 111 - .../com/bigdata/graph/TrustRankRunner.scala | 132 - .../main/scala/com/bigdata/graph/Util.scala | 281 - .../com/bigdata/graph/WCEHiveRunner.scala | 78 - .../scala/com/bigdata/graph/WCERunner.scala | 108 - .../graph/WeightedPageRankRunner.scala | 146 - .../main/scala/com/bigdata/ml/ALSRunner.scala | 279 - .../main/scala/com/bigdata/ml/CovRunner.scala | 164 - .../main/scala/com/bigdata/ml/DTRunner.scala | 403 - .../scala/com/bigdata/ml/GBDTRunner.scala | 322 - .../main/scala/com/bigdata/ml/IDFRunner.scala | 97 - .../scala/com/bigdata/ml/KMeansRunner.scala | 192 - .../main/scala/com/bigdata/ml/KNNRunner.scala | 249 - .../main/scala/com/bigdata/ml/LDARunner.scala | 264 - .../scala/com/bigdata/ml/LinRRunner.scala | 225 - .../scala/com/bigdata/ml/LogRRunner.scala | 207 - .../main/scala/com/bigdata/ml/PCARunner.scala | 217 - .../scala/com/bigdata/ml/PearsonRunner.scala | 207 - .../com/bigdata/ml/PrefixSpanRunner.scala | 200 - .../main/scala/com/bigdata/ml/RFRunner.scala | 404 - .../scala/com/bigdata/ml/SPCARunner.scala | 192 - .../main/scala/com/bigdata/ml/SVDRunner.scala | 179 - .../main/scala/com/bigdata/ml/SVMRunner.scala | 151 - .../scala/com/bigdata/ml/SimRankRunner.scala | 151 - .../scala/com/bigdata/ml/SpearManRunner.scala | 215 - .../scala/com/bigdata/ml/XGBTRunner.scala | 206 - .../main/scala/com/bigdata/utils/Utils.scala | 83 - .../ml/classification/KNNClassifier.scala | 239 - .../spark/ml/clustering/DBSCANRunner.scala | 143 - .../scala/org/apache/spark/ml/knn/KNN.scala | 588 -- .../org/apache/spark/ml/knn/MetricTree.scala | 397 - .../ml/recommendation/SimRankOpenSource.scala | 151 - .../spark/ml/regression/KNNRegression.scala | 156 - .../org/apache/spark/mllib/knn/KNNUtils.scala | 20 - 1338 files changed, 358 insertions(+), 276503 deletions(-) delete mode 100644 ml-accelerator/src/main/scala/breeze/optimize/FirstOrderMinimizerX.scala delete mode 100644 ml-accelerator/src/main/scala/breeze/optimize/LBFGSX.scala delete mode 100644 ml-accelerator/src/main/scala/breeze/optimize/OWLQNX.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/classification/GBTClassifier.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/classification/LinearSVC.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/classification/RandomForestClassifier.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/feature/IDF.scala create mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/fpm/PrefixSpan.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/DifferentiableLossAggregatorX.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/HingeAggregatorX.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/HuberAggregatorX.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/LeastSquaresAggregatorX.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/LogisticAggregatorX.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/optim/loss/RDDLossFunctionX.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/regression/GBTRegressor.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/regression/RandomForestRegressor.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/stat/Correlation.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/DecisionForest.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTrees.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/NodeIdCache.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForest.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForest4GBDTX.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForestRaw.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/ml/tree/treeParams.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/KMACCm.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LocalKMeansX.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/mllib/feature/IDF.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/mllib/linalg/EigenValueDecomposition.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/Correlation.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/PearsonCorrelation.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/SpearmanCorrelation.scala delete mode 100644 ml-accelerator/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala delete mode 100644 ml-core/src/main/scala/org/apache/spark/ml/tree/Node.scala delete mode 100644 ml-core/src/main/scala/org/apache/spark/ml/tree/Split.scala delete mode 100644 ml-core/src/main/scala/org/apache/spark/ml/tree/impl/BaggedPoint.scala delete mode 100644 ml-core/src/main/scala/org/apache/spark/ml/tree/impl/DTFeatureStatsAggregator.scala delete mode 100644 ml-core/src/main/scala/org/apache/spark/ml/tree/impl/DTStatsAggregator.scala delete mode 100644 ml-core/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTreesCore.scala delete mode 100644 ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Entropy.scala delete mode 100644 ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala delete mode 100644 ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurities.scala delete mode 100644 ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala delete mode 100644 ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Variance.scala delete mode 100644 ml-kernel-client/src/main/scala/breeze/optimize/ACC.scala delete mode 100644 ml-kernel-client/src/main/scala/breeze/optimize/LBFGSL.scala delete mode 100644 ml-kernel-client/src/main/scala/breeze/optimize/OWLQNL.scala delete mode 100644 ml-kernel-client/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTreesUtil.scala delete mode 100644 ml-kernel-client/src/main/scala/org/apache/spark/mllib.clustering/KmeansUtil.scala delete mode 100644 ml-xgboost/.clang-tidy delete mode 100644 ml-xgboost/.editorconfig delete mode 100644 ml-xgboost/.gitmodules delete mode 100644 ml-xgboost/.travis.yml delete mode 100644 ml-xgboost/CITATION delete mode 100644 ml-xgboost/CMakeLists.txt delete mode 100644 ml-xgboost/CONTRIBUTORS.md delete mode 100644 ml-xgboost/Jenkinsfile delete mode 100644 ml-xgboost/Jenkinsfile-win64 delete mode 100644 ml-xgboost/LICENSE delete mode 100644 ml-xgboost/Makefile delete mode 100644 ml-xgboost/NEWS.md delete mode 100644 ml-xgboost/R-package/.Rbuildignore delete mode 100644 ml-xgboost/R-package/CMakeLists.txt delete mode 100644 ml-xgboost/R-package/DESCRIPTION delete mode 100644 ml-xgboost/R-package/LICENSE delete mode 100644 ml-xgboost/R-package/NAMESPACE delete mode 100644 ml-xgboost/R-package/R/callbacks.R delete mode 100644 ml-xgboost/R-package/R/utils.R delete mode 100644 ml-xgboost/R-package/R/xgb.Booster.R delete mode 100644 ml-xgboost/R-package/R/xgb.DMatrix.R delete mode 100644 ml-xgboost/R-package/R/xgb.DMatrix.save.R delete mode 100644 ml-xgboost/R-package/R/xgb.create.features.R delete mode 100644 ml-xgboost/R-package/R/xgb.cv.R delete mode 100644 ml-xgboost/R-package/R/xgb.dump.R delete mode 100644 ml-xgboost/R-package/R/xgb.ggplot.R delete mode 100644 ml-xgboost/R-package/R/xgb.importance.R delete mode 100644 ml-xgboost/R-package/R/xgb.load.R delete mode 100644 ml-xgboost/R-package/R/xgb.load.raw.R delete mode 100644 ml-xgboost/R-package/R/xgb.model.dt.tree.R delete mode 100644 ml-xgboost/R-package/R/xgb.plot.deepness.R delete mode 100644 ml-xgboost/R-package/R/xgb.plot.importance.R delete mode 100644 ml-xgboost/R-package/R/xgb.plot.multi.trees.R delete mode 100644 ml-xgboost/R-package/R/xgb.plot.shap.R delete mode 100644 ml-xgboost/R-package/R/xgb.plot.tree.R delete mode 100644 ml-xgboost/R-package/R/xgb.save.R delete mode 100644 ml-xgboost/R-package/R/xgb.save.raw.R delete mode 100644 ml-xgboost/R-package/R/xgb.serialize.R delete mode 100644 ml-xgboost/R-package/R/xgb.train.R delete mode 100644 ml-xgboost/R-package/R/xgb.unserialize.R delete mode 100644 ml-xgboost/R-package/R/xgboost.R delete mode 100644 ml-xgboost/R-package/README.md delete mode 100644 ml-xgboost/R-package/cleanup delete mode 100644 ml-xgboost/R-package/configure delete mode 100644 ml-xgboost/R-package/configure.ac delete mode 100644 ml-xgboost/R-package/configure.win delete mode 100644 ml-xgboost/R-package/demo/00Index delete mode 100644 ml-xgboost/R-package/demo/README.md delete mode 100644 ml-xgboost/R-package/demo/basic_walkthrough.R delete mode 100644 ml-xgboost/R-package/demo/boost_from_prediction.R delete mode 100644 ml-xgboost/R-package/demo/caret_wrapper.R delete mode 100644 ml-xgboost/R-package/demo/create_sparse_matrix.R delete mode 100644 ml-xgboost/R-package/demo/cross_validation.R delete mode 100644 ml-xgboost/R-package/demo/custom_objective.R delete mode 100644 ml-xgboost/R-package/demo/early_stopping.R delete mode 100644 ml-xgboost/R-package/demo/generalized_linear_model.R delete mode 100644 ml-xgboost/R-package/demo/gpu_accelerated.R delete mode 100644 ml-xgboost/R-package/demo/interaction_constraints.R delete mode 100644 ml-xgboost/R-package/demo/poisson_regression.R delete mode 100644 ml-xgboost/R-package/demo/predict_first_ntree.R delete mode 100644 ml-xgboost/R-package/demo/predict_leaf_indices.R delete mode 100644 ml-xgboost/R-package/demo/runall.R delete mode 100644 ml-xgboost/R-package/demo/tweedie_regression.R delete mode 100644 ml-xgboost/R-package/man/agaricus.test.Rd delete mode 100644 ml-xgboost/R-package/man/agaricus.train.Rd delete mode 100644 ml-xgboost/R-package/man/callbacks.Rd delete mode 100644 ml-xgboost/R-package/man/cb.cv.predict.Rd delete mode 100644 ml-xgboost/R-package/man/cb.early.stop.Rd delete mode 100644 ml-xgboost/R-package/man/cb.evaluation.log.Rd delete mode 100644 ml-xgboost/R-package/man/cb.gblinear.history.Rd delete mode 100644 ml-xgboost/R-package/man/cb.print.evaluation.Rd delete mode 100644 ml-xgboost/R-package/man/cb.reset.parameters.Rd delete mode 100644 ml-xgboost/R-package/man/cb.save.model.Rd delete mode 100644 ml-xgboost/R-package/man/dim.xgb.DMatrix.Rd delete mode 100644 ml-xgboost/R-package/man/dimnames.xgb.DMatrix.Rd delete mode 100644 ml-xgboost/R-package/man/getinfo.Rd delete mode 100644 ml-xgboost/R-package/man/predict.xgb.Booster.Rd delete mode 100644 ml-xgboost/R-package/man/print.xgb.Booster.Rd delete mode 100644 ml-xgboost/R-package/man/print.xgb.DMatrix.Rd delete mode 100644 ml-xgboost/R-package/man/print.xgb.cv.Rd delete mode 100644 ml-xgboost/R-package/man/setinfo.Rd delete mode 100644 ml-xgboost/R-package/man/slice.xgb.DMatrix.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.Booster.complete.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.DMatrix.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.DMatrix.save.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.attr.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.config.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.create.features.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.cv.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.dump.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.gblinear.history.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.importance.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.load.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.load.raw.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.model.dt.tree.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.parameters.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.plot.deepness.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.plot.importance.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.plot.multi.trees.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.plot.shap.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.plot.tree.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.save.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.save.raw.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.serialize.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.train.Rd delete mode 100644 ml-xgboost/R-package/man/xgb.unserialize.Rd delete mode 100644 ml-xgboost/R-package/man/xgboost-deprecated.Rd delete mode 100644 ml-xgboost/R-package/remove_warning_suppression_pragma.sh delete mode 100644 ml-xgboost/R-package/src/Makevars.in delete mode 100644 ml-xgboost/R-package/src/Makevars.win delete mode 100644 ml-xgboost/R-package/src/init.c delete mode 100644 ml-xgboost/R-package/src/xgboost_R.cc delete mode 100644 ml-xgboost/R-package/src/xgboost_R.h delete mode 100644 ml-xgboost/R-package/src/xgboost_assert.c delete mode 100644 ml-xgboost/R-package/src/xgboost_custom.cc delete mode 100644 ml-xgboost/R-package/tests/testthat.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_basic.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_callbacks.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_custom_objective.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_dmatrix.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_gc_safety.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_glm.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_helpers.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_interaction_constraints.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_interactions.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_lint.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_monotone.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_parameter_exposure.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_poisson_regression.R delete mode 100644 ml-xgboost/R-package/tests/testthat/test_update.R delete mode 100644 ml-xgboost/R-package/vignettes/discoverYourData.Rmd delete mode 100644 ml-xgboost/R-package/vignettes/vignette.css delete mode 100644 ml-xgboost/R-package/vignettes/xgboost.Rnw delete mode 100644 ml-xgboost/R-package/vignettes/xgboost.bib delete mode 100644 ml-xgboost/R-package/vignettes/xgboostPresentation.Rmd delete mode 100644 ml-xgboost/R-package/vignettes/xgboostfromJSON.Rmd delete mode 100644 ml-xgboost/README.md delete mode 100644 ml-xgboost/amalgamation/dmlc-minimum0.cc delete mode 100644 ml-xgboost/amalgamation/xgboost-all0.cc delete mode 100644 ml-xgboost/appveyor.yml delete mode 100644 ml-xgboost/cmake/Doc.cmake delete mode 100644 ml-xgboost/cmake/FindPrefetchIntrinsics.cmake delete mode 100644 ml-xgboost/cmake/Python_version.in delete mode 100644 ml-xgboost/cmake/Sanitizer.cmake delete mode 100644 ml-xgboost/cmake/Utils.cmake delete mode 100644 ml-xgboost/cmake/Version.cmake delete mode 100644 ml-xgboost/cmake/modules/FindASan.cmake delete mode 100644 ml-xgboost/cmake/modules/FindLSan.cmake delete mode 100644 ml-xgboost/cmake/modules/FindLibR.cmake delete mode 100644 ml-xgboost/cmake/modules/FindNVML.cmake delete mode 100644 ml-xgboost/cmake/modules/FindNccl.cmake delete mode 100644 ml-xgboost/cmake/modules/FindTSan.cmake delete mode 100644 ml-xgboost/cmake/modules/FindUBSan.cmake delete mode 100644 ml-xgboost/cmake/version_config.h.in delete mode 100644 ml-xgboost/cmake/xgboost-config.cmake.in delete mode 100644 ml-xgboost/cub/.cproject delete mode 100644 ml-xgboost/cub/CHANGE_LOG.TXT delete mode 100644 ml-xgboost/cub/LICENSE.TXT delete mode 100644 ml-xgboost/cub/README.md delete mode 100644 ml-xgboost/cub/common.mk delete mode 100644 ml-xgboost/cub/cub/agent/agent_histogram.cuh delete mode 100644 ml-xgboost/cub/cub/agent/agent_radix_sort_downsweep.cuh delete mode 100644 ml-xgboost/cub/cub/agent/agent_radix_sort_upsweep.cuh delete mode 100644 ml-xgboost/cub/cub/agent/agent_reduce.cuh delete mode 100644 ml-xgboost/cub/cub/agent/agent_reduce_by_key.cuh delete mode 100644 ml-xgboost/cub/cub/agent/agent_rle.cuh delete mode 100644 ml-xgboost/cub/cub/agent/agent_scan.cuh delete mode 100644 ml-xgboost/cub/cub/agent/agent_segment_fixup.cuh delete mode 100644 ml-xgboost/cub/cub/agent/agent_select_if.cuh delete mode 100644 ml-xgboost/cub/cub/agent/agent_spmv_csrt.cuh delete mode 100644 ml-xgboost/cub/cub/agent/agent_spmv_orig.cuh delete mode 100644 ml-xgboost/cub/cub/agent/agent_spmv_row_based.cuh delete mode 100644 ml-xgboost/cub/cub/agent/single_pass_scan_operators.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_adjacent_difference.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_discontinuity.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_exchange.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_histogram.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_load.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_radix_rank.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_radix_sort.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_raking_layout.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_reduce.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_scan.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_shuffle.cuh delete mode 100644 ml-xgboost/cub/cub/block/block_store.cuh delete mode 100644 ml-xgboost/cub/cub/block/specializations/block_histogram_atomic.cuh delete mode 100644 ml-xgboost/cub/cub/block/specializations/block_histogram_sort.cuh delete mode 100644 ml-xgboost/cub/cub/block/specializations/block_reduce_raking.cuh delete mode 100644 ml-xgboost/cub/cub/block/specializations/block_reduce_raking_commutative_only.cuh delete mode 100644 ml-xgboost/cub/cub/block/specializations/block_reduce_warp_reductions.cuh delete mode 100644 ml-xgboost/cub/cub/block/specializations/block_scan_raking.cuh delete mode 100644 ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans.cuh delete mode 100644 ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans2.cuh delete mode 100644 ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans3.cuh delete mode 100644 ml-xgboost/cub/cub/cub.cuh delete mode 100644 ml-xgboost/cub/cub/device/device_histogram.cuh delete mode 100644 ml-xgboost/cub/cub/device/device_partition.cuh delete mode 100644 ml-xgboost/cub/cub/device/device_radix_sort.cuh delete mode 100644 ml-xgboost/cub/cub/device/device_reduce.cuh delete mode 100644 ml-xgboost/cub/cub/device/device_run_length_encode.cuh delete mode 100644 ml-xgboost/cub/cub/device/device_scan.cuh delete mode 100644 ml-xgboost/cub/cub/device/device_segmented_radix_sort.cuh delete mode 100644 ml-xgboost/cub/cub/device/device_segmented_reduce.cuh delete mode 100644 ml-xgboost/cub/cub/device/device_select.cuh delete mode 100644 ml-xgboost/cub/cub/device/device_spmv.cuh delete mode 100644 ml-xgboost/cub/cub/device/dispatch/dispatch_histogram.cuh delete mode 100644 ml-xgboost/cub/cub/device/dispatch/dispatch_radix_sort.cuh delete mode 100644 ml-xgboost/cub/cub/device/dispatch/dispatch_reduce.cuh delete mode 100644 ml-xgboost/cub/cub/device/dispatch/dispatch_reduce_by_key.cuh delete mode 100644 ml-xgboost/cub/cub/device/dispatch/dispatch_rle.cuh delete mode 100644 ml-xgboost/cub/cub/device/dispatch/dispatch_scan.cuh delete mode 100644 ml-xgboost/cub/cub/device/dispatch/dispatch_select_if.cuh delete mode 100644 ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_csrt.cuh delete mode 100644 ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_orig.cuh delete mode 100644 ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_row_based.cuh delete mode 100644 ml-xgboost/cub/cub/grid/grid_barrier.cuh delete mode 100644 ml-xgboost/cub/cub/grid/grid_even_share.cuh delete mode 100644 ml-xgboost/cub/cub/grid/grid_mapping.cuh delete mode 100644 ml-xgboost/cub/cub/grid/grid_queue.cuh delete mode 100644 ml-xgboost/cub/cub/host/mutex.cuh delete mode 100644 ml-xgboost/cub/cub/iterator/arg_index_input_iterator.cuh delete mode 100644 ml-xgboost/cub/cub/iterator/cache_modified_input_iterator.cuh delete mode 100644 ml-xgboost/cub/cub/iterator/cache_modified_output_iterator.cuh delete mode 100644 ml-xgboost/cub/cub/iterator/constant_input_iterator.cuh delete mode 100644 ml-xgboost/cub/cub/iterator/counting_input_iterator.cuh delete mode 100644 ml-xgboost/cub/cub/iterator/discard_output_iterator.cuh delete mode 100644 ml-xgboost/cub/cub/iterator/tex_obj_input_iterator.cuh delete mode 100644 ml-xgboost/cub/cub/iterator/tex_ref_input_iterator.cuh delete mode 100644 ml-xgboost/cub/cub/iterator/transform_input_iterator.cuh delete mode 100644 ml-xgboost/cub/cub/thread/thread_load.cuh delete mode 100644 ml-xgboost/cub/cub/thread/thread_operators.cuh delete mode 100644 ml-xgboost/cub/cub/thread/thread_reduce.cuh delete mode 100644 ml-xgboost/cub/cub/thread/thread_scan.cuh delete mode 100644 ml-xgboost/cub/cub/thread/thread_search.cuh delete mode 100644 ml-xgboost/cub/cub/thread/thread_store.cuh delete mode 100644 ml-xgboost/cub/cub/util_allocator.cuh delete mode 100644 ml-xgboost/cub/cub/util_arch.cuh delete mode 100644 ml-xgboost/cub/cub/util_debug.cuh delete mode 100644 ml-xgboost/cub/cub/util_device.cuh delete mode 100644 ml-xgboost/cub/cub/util_macro.cuh delete mode 100644 ml-xgboost/cub/cub/util_namespace.cuh delete mode 100644 ml-xgboost/cub/cub/util_ptx.cuh delete mode 100644 ml-xgboost/cub/cub/util_type.cuh delete mode 100644 ml-xgboost/cub/cub/warp/specializations/warp_reduce_shfl.cuh delete mode 100644 ml-xgboost/cub/cub/warp/specializations/warp_reduce_smem.cuh delete mode 100644 ml-xgboost/cub/cub/warp/specializations/warp_scan_shfl.cuh delete mode 100644 ml-xgboost/cub/cub/warp/specializations/warp_scan_smem.cuh delete mode 100644 ml-xgboost/cub/cub/warp/warp_reduce.cuh delete mode 100644 ml-xgboost/cub/cub/warp/warp_scan.cuh delete mode 100644 ml-xgboost/cub/eclipse code style profile.xml delete mode 100644 ml-xgboost/cub/examples/block/.gitignore delete mode 100644 ml-xgboost/cub/examples/block/Makefile delete mode 100644 ml-xgboost/cub/examples/block/example_block_radix_sort.cu delete mode 100644 ml-xgboost/cub/examples/block/example_block_reduce.cu delete mode 100644 ml-xgboost/cub/examples/block/example_block_scan.cu delete mode 100644 ml-xgboost/cub/examples/block/reduce_by_key.cu delete mode 100644 ml-xgboost/cub/examples/device/.gitignore delete mode 100644 ml-xgboost/cub/examples/device/Makefile delete mode 100644 ml-xgboost/cub/examples/device/example_device_partition_flagged.cu delete mode 100644 ml-xgboost/cub/examples/device/example_device_partition_if.cu delete mode 100644 ml-xgboost/cub/examples/device/example_device_radix_sort.cu delete mode 100644 ml-xgboost/cub/examples/device/example_device_reduce.cu delete mode 100644 ml-xgboost/cub/examples/device/example_device_scan.cu delete mode 100644 ml-xgboost/cub/examples/device/example_device_select_flagged.cu delete mode 100644 ml-xgboost/cub/examples/device/example_device_select_if.cu delete mode 100644 ml-xgboost/cub/examples/device/example_device_select_unique.cu delete mode 100644 ml-xgboost/cub/examples/device/example_device_sort_find_non_trivial_runs.cu delete mode 100644 ml-xgboost/cub/experimental/.gitignore delete mode 100644 ml-xgboost/cub/experimental/Makefile delete mode 100644 ml-xgboost/cub/experimental/defunct/example_coo_spmv.cu delete mode 100644 ml-xgboost/cub/experimental/defunct/test_device_seg_reduce.cu delete mode 100644 ml-xgboost/cub/experimental/histogram/histogram_cub.h delete mode 100644 ml-xgboost/cub/experimental/histogram/histogram_gmem_atomics.h delete mode 100644 ml-xgboost/cub/experimental/histogram/histogram_smem_atomics.h delete mode 100644 ml-xgboost/cub/experimental/histogram_compare.cu delete mode 100644 ml-xgboost/cub/experimental/sparse_matrix.h delete mode 100644 ml-xgboost/cub/experimental/spmv_compare.cu delete mode 100644 ml-xgboost/cub/experimental/spmv_script.sh delete mode 100644 ml-xgboost/cub/test/.gitignore delete mode 100644 ml-xgboost/cub/test/Makefile delete mode 100644 ml-xgboost/cub/test/link_a.cu delete mode 100644 ml-xgboost/cub/test/link_b.cu delete mode 100644 ml-xgboost/cub/test/link_main.cpp delete mode 100644 ml-xgboost/cub/test/mersenne.h delete mode 100644 ml-xgboost/cub/test/test_allocator.cu delete mode 100644 ml-xgboost/cub/test/test_block_histogram.cu delete mode 100644 ml-xgboost/cub/test/test_block_load_store.cu delete mode 100644 ml-xgboost/cub/test/test_block_radix_sort.cu delete mode 100644 ml-xgboost/cub/test/test_block_reduce.cu delete mode 100644 ml-xgboost/cub/test/test_block_scan.cu delete mode 100644 ml-xgboost/cub/test/test_device_histogram.cu delete mode 100644 ml-xgboost/cub/test/test_device_radix_sort.cu delete mode 100644 ml-xgboost/cub/test/test_device_reduce.cu delete mode 100644 ml-xgboost/cub/test/test_device_reduce_by_key.cu delete mode 100644 ml-xgboost/cub/test/test_device_run_length_encode.cu delete mode 100644 ml-xgboost/cub/test/test_device_scan.cu delete mode 100644 ml-xgboost/cub/test/test_device_select_if.cu delete mode 100644 ml-xgboost/cub/test/test_device_select_unique.cu delete mode 100644 ml-xgboost/cub/test/test_grid_barrier.cu delete mode 100644 ml-xgboost/cub/test/test_iterator.cu delete mode 100644 ml-xgboost/cub/test/test_util.h delete mode 100644 ml-xgboost/cub/test/test_warp_reduce.cu delete mode 100644 ml-xgboost/cub/test/test_warp_scan.cu delete mode 100644 ml-xgboost/cub/tune/.gitignore delete mode 100644 ml-xgboost/cub/tune/Makefile delete mode 100644 ml-xgboost/cub/tune/tune_device_reduce.cu delete mode 100644 ml-xgboost/demo/.gitignore delete mode 100644 ml-xgboost/demo/README.md delete mode 100644 ml-xgboost/demo/aft_survival/aft_survival_demo.py delete mode 100644 ml-xgboost/demo/aft_survival/aft_survival_demo_with_optuna.py delete mode 100644 ml-xgboost/demo/aft_survival/aft_survival_viz_demo.py delete mode 100644 ml-xgboost/demo/binary_classification/README.md delete mode 100644 ml-xgboost/demo/binary_classification/agaricus-lepiota.data delete mode 100644 ml-xgboost/demo/binary_classification/agaricus-lepiota.fmap delete mode 100644 ml-xgboost/demo/binary_classification/agaricus-lepiota.names delete mode 100644 ml-xgboost/demo/binary_classification/mapfeat.py delete mode 100644 ml-xgboost/demo/binary_classification/mknfold.py delete mode 100644 ml-xgboost/demo/binary_classification/mushroom.conf delete mode 100644 ml-xgboost/demo/binary_classification/runexp.sh delete mode 100644 ml-xgboost/demo/c-api/CMakeLists.txt delete mode 100644 ml-xgboost/demo/c-api/Makefile delete mode 100644 ml-xgboost/demo/c-api/README.md delete mode 100644 ml-xgboost/demo/c-api/c-api-demo.c delete mode 100644 ml-xgboost/demo/dask/README.md delete mode 100644 ml-xgboost/demo/dask/cpu_training.py delete mode 100644 ml-xgboost/demo/dask/gpu_training.py delete mode 100644 ml-xgboost/demo/dask/sklearn_cpu_training.py delete mode 100644 ml-xgboost/demo/dask/sklearn_gpu_training.py delete mode 100644 ml-xgboost/demo/data/README.md delete mode 100644 ml-xgboost/demo/data/agaricus.txt.test delete mode 100644 ml-xgboost/demo/data/agaricus.txt.train delete mode 100644 ml-xgboost/demo/data/dermatology.data.test delete mode 100644 ml-xgboost/demo/data/dermatology.data.train delete mode 100644 ml-xgboost/demo/data/dermatology_process.py delete mode 100644 ml-xgboost/demo/data/featmap.txt delete mode 100644 ml-xgboost/demo/data/gen_autoclaims.R delete mode 100644 ml-xgboost/demo/data/veterans_lung_cancer.csv delete mode 100644 ml-xgboost/demo/distributed-training/README.md delete mode 100644 ml-xgboost/demo/distributed-training/mushroom.aws.conf delete mode 100644 ml-xgboost/demo/distributed-training/plot_model.ipynb delete mode 100644 ml-xgboost/demo/distributed-training/run_aws.sh delete mode 100644 ml-xgboost/demo/gpu_acceleration/README.md delete mode 100644 ml-xgboost/demo/gpu_acceleration/cover_type.py delete mode 100644 ml-xgboost/demo/gpu_acceleration/memory.py delete mode 100644 ml-xgboost/demo/guide-python/README.md delete mode 100644 ml-xgboost/demo/guide-python/basic_walkthrough.py delete mode 100644 ml-xgboost/demo/guide-python/boost_from_prediction.py delete mode 100644 ml-xgboost/demo/guide-python/cross_validation.py delete mode 100644 ml-xgboost/demo/guide-python/custom_objective.py delete mode 100644 ml-xgboost/demo/guide-python/custom_rmsle.py delete mode 100644 ml-xgboost/demo/guide-python/custom_softmax.py delete mode 100644 ml-xgboost/demo/guide-python/evals_result.py delete mode 100644 ml-xgboost/demo/guide-python/external_memory.py delete mode 100644 ml-xgboost/demo/guide-python/gamma_regression.py delete mode 100644 ml-xgboost/demo/guide-python/generalized_linear_model.py delete mode 100644 ml-xgboost/demo/guide-python/predict_first_ntree.py delete mode 100644 ml-xgboost/demo/guide-python/predict_leaf_indices.py delete mode 100644 ml-xgboost/demo/guide-python/runall.sh delete mode 100644 ml-xgboost/demo/guide-python/sklearn_evals_result.py delete mode 100644 ml-xgboost/demo/guide-python/sklearn_examples.py delete mode 100644 ml-xgboost/demo/guide-python/sklearn_parallel.py delete mode 100644 ml-xgboost/demo/json-model/README.md delete mode 100644 ml-xgboost/demo/json-model/json_parser.py delete mode 100644 ml-xgboost/demo/kaggle-higgs/README.md delete mode 100644 ml-xgboost/demo/kaggle-higgs/higgs-cv.py delete mode 100644 ml-xgboost/demo/kaggle-higgs/higgs-numpy.py delete mode 100644 ml-xgboost/demo/kaggle-higgs/higgs-pred.R delete mode 100644 ml-xgboost/demo/kaggle-higgs/higgs-pred.py delete mode 100644 ml-xgboost/demo/kaggle-higgs/higgs-train.R delete mode 100644 ml-xgboost/demo/kaggle-higgs/run.sh delete mode 100644 ml-xgboost/demo/kaggle-higgs/speedtest.R delete mode 100644 ml-xgboost/demo/kaggle-higgs/speedtest.py delete mode 100644 ml-xgboost/demo/kaggle-otto/README.MD delete mode 100644 ml-xgboost/demo/kaggle-otto/otto_train_pred.R delete mode 100644 ml-xgboost/demo/kaggle-otto/understandingXGBoostModel.Rmd delete mode 100644 ml-xgboost/demo/multiclass_classification/README.md delete mode 100644 ml-xgboost/demo/multiclass_classification/runexp.sh delete mode 100644 ml-xgboost/demo/multiclass_classification/train.R delete mode 100644 ml-xgboost/demo/multiclass_classification/train.py delete mode 100644 ml-xgboost/demo/rank/README.md delete mode 100644 ml-xgboost/demo/rank/mq2008.conf delete mode 100644 ml-xgboost/demo/rank/rank.py delete mode 100644 ml-xgboost/demo/rank/rank_sklearn.py delete mode 100644 ml-xgboost/demo/rank/runexp.sh delete mode 100644 ml-xgboost/demo/rank/trans_data.py delete mode 100644 ml-xgboost/demo/rank/wgetdata.sh delete mode 100644 ml-xgboost/demo/regression/README.md delete mode 100644 ml-xgboost/demo/regression/machine.conf delete mode 100644 ml-xgboost/demo/regression/machine.data delete mode 100644 ml-xgboost/demo/regression/machine.names delete mode 100644 ml-xgboost/demo/regression/mapfeat.py delete mode 100644 ml-xgboost/demo/regression/mknfold.py delete mode 100644 ml-xgboost/demo/regression/runexp.sh delete mode 100644 ml-xgboost/demo/yearpredMSD/README.md delete mode 100644 ml-xgboost/demo/yearpredMSD/csv2libsvm.py delete mode 100644 ml-xgboost/demo/yearpredMSD/runexp.sh delete mode 100644 ml-xgboost/demo/yearpredMSD/yearpredMSD.conf delete mode 100644 ml-xgboost/dev/query_contributors.py delete mode 100644 ml-xgboost/dmlc-core/.editorconfig delete mode 100644 ml-xgboost/dmlc-core/.gitignore delete mode 100644 ml-xgboost/dmlc-core/CMakeLists.txt delete mode 100644 ml-xgboost/dmlc-core/LICENSE delete mode 100644 ml-xgboost/dmlc-core/Makefile delete mode 100644 ml-xgboost/dmlc-core/README.md delete mode 100644 ml-xgboost/dmlc-core/appveyor.yml delete mode 100644 ml-xgboost/dmlc-core/cmake/Modules/FindASan.cmake delete mode 100644 ml-xgboost/dmlc-core/cmake/Modules/FindHDFS.cmake delete mode 100644 ml-xgboost/dmlc-core/cmake/Modules/FindLSan.cmake delete mode 100644 ml-xgboost/dmlc-core/cmake/Modules/FindTSan.cmake delete mode 100644 ml-xgboost/dmlc-core/cmake/Modules/FindUBSan.cmake delete mode 100644 ml-xgboost/dmlc-core/cmake/Sanitizer.cmake delete mode 100644 ml-xgboost/dmlc-core/cmake/Utils.cmake delete mode 100644 ml-xgboost/dmlc-core/cmake/build_config.h.in delete mode 100644 ml-xgboost/dmlc-core/cmake/dmlc-config.cmake.in delete mode 100644 ml-xgboost/dmlc-core/cmake/gtest_cmake.in delete mode 100644 ml-xgboost/dmlc-core/cmake/lint.cmake delete mode 100644 ml-xgboost/dmlc-core/doc/.gitignore delete mode 100644 ml-xgboost/dmlc-core/doc/Doxyfile delete mode 100644 ml-xgboost/dmlc-core/doc/Makefile delete mode 100644 ml-xgboost/dmlc-core/doc/README delete mode 100644 ml-xgboost/dmlc-core/doc/build.md delete mode 100644 ml-xgboost/dmlc-core/doc/conf.py delete mode 100644 ml-xgboost/dmlc-core/doc/index.md delete mode 100644 ml-xgboost/dmlc-core/doc/parameter.md delete mode 100644 ml-xgboost/dmlc-core/doc/sphinx_util.py delete mode 100644 ml-xgboost/dmlc-core/example/dmlc_example.mk delete mode 100644 ml-xgboost/dmlc-core/example/parameter.cc delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/any.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/array_view.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/base.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/blockingconcurrentqueue.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/build_config_default.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/common.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/concurrency.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/concurrentqueue.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/config.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/data.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/endian.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/filesystem.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/input_split_shuffle.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/io.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/json.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/logging.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/lua.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/memory.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/memory_io.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/omp.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/optional.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/parameter.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/recordio.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/registry.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/serializer.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/strtonum.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/thread_group.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/thread_local.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/threadediter.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/timer.h delete mode 100644 ml-xgboost/dmlc-core/include/dmlc/type_traits.h delete mode 100644 ml-xgboost/dmlc-core/make/dmlc.mk delete mode 100644 ml-xgboost/dmlc-core/scripts/lint.py delete mode 100644 ml-xgboost/dmlc-core/scripts/packages.mk delete mode 100644 ml-xgboost/dmlc-core/scripts/s390x/Dockerfile delete mode 100644 ml-xgboost/dmlc-core/scripts/s390x/build_via_cmake.sh delete mode 100644 ml-xgboost/dmlc-core/scripts/s390x/ci_build.sh delete mode 100644 ml-xgboost/dmlc-core/scripts/s390x/entrypoint.sh delete mode 100644 ml-xgboost/dmlc-core/scripts/test_script.sh delete mode 100644 ml-xgboost/dmlc-core/src/config.cc delete mode 100644 ml-xgboost/dmlc-core/src/data.cc delete mode 100644 ml-xgboost/dmlc-core/src/data/basic_row_iter.h delete mode 100644 ml-xgboost/dmlc-core/src/data/csv_parser.h delete mode 100644 ml-xgboost/dmlc-core/src/data/disk_row_iter.h delete mode 100644 ml-xgboost/dmlc-core/src/data/libfm_parser.h delete mode 100644 ml-xgboost/dmlc-core/src/data/libsvm_parser.h delete mode 100644 ml-xgboost/dmlc-core/src/data/parser.h delete mode 100644 ml-xgboost/dmlc-core/src/data/row_block.h delete mode 100644 ml-xgboost/dmlc-core/src/data/text_parser.h delete mode 100644 ml-xgboost/dmlc-core/src/io.cc delete mode 100644 ml-xgboost/dmlc-core/src/io/azure_filesys.cc delete mode 100644 ml-xgboost/dmlc-core/src/io/azure_filesys.h delete mode 100644 ml-xgboost/dmlc-core/src/io/cached_input_split.h delete mode 100644 ml-xgboost/dmlc-core/src/io/filesys.cc delete mode 100644 ml-xgboost/dmlc-core/src/io/hdfs_filesys.cc delete mode 100644 ml-xgboost/dmlc-core/src/io/hdfs_filesys.h delete mode 100644 ml-xgboost/dmlc-core/src/io/indexed_recordio_split.cc delete mode 100644 ml-xgboost/dmlc-core/src/io/indexed_recordio_split.h delete mode 100644 ml-xgboost/dmlc-core/src/io/input_split_base.cc delete mode 100644 ml-xgboost/dmlc-core/src/io/input_split_base.h delete mode 100644 ml-xgboost/dmlc-core/src/io/line_split.cc delete mode 100644 ml-xgboost/dmlc-core/src/io/line_split.h delete mode 100644 ml-xgboost/dmlc-core/src/io/local_filesys.cc delete mode 100644 ml-xgboost/dmlc-core/src/io/local_filesys.h delete mode 100644 ml-xgboost/dmlc-core/src/io/recordio_split.cc delete mode 100644 ml-xgboost/dmlc-core/src/io/recordio_split.h delete mode 100644 ml-xgboost/dmlc-core/src/io/s3_filesys.cc delete mode 100644 ml-xgboost/dmlc-core/src/io/s3_filesys.h delete mode 100644 ml-xgboost/dmlc-core/src/io/single_file_split.h delete mode 100644 ml-xgboost/dmlc-core/src/io/single_threaded_input_split.h delete mode 100644 ml-xgboost/dmlc-core/src/io/threaded_input_split.h delete mode 100644 ml-xgboost/dmlc-core/src/io/uri_spec.h delete mode 100644 ml-xgboost/dmlc-core/src/recordio.cc delete mode 100644 ml-xgboost/dmlc-core/test/.gitignore delete mode 100644 ml-xgboost/dmlc-core/test/README.md delete mode 100644 ml-xgboost/dmlc-core/test/csv_parser_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/dataiter_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/dmlc_test.mk delete mode 100644 ml-xgboost/dmlc-core/test/filesys_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/iostream_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/libfm_parser_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/libsvm_parser_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/logging_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/parameter_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/recordio_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/registry_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/split_read_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/split_repeat_read_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/split_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/stream_read_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/strtonum_test.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/.gitignore delete mode 100644 ml-xgboost/dmlc-core/test/unittest/CMakeLists.txt delete mode 100644 ml-xgboost/dmlc-core/test/unittest/build_config.h.in delete mode 100644 ml-xgboost/dmlc-core/test/unittest/dmlc_unittest.mk delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_any.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_array_view.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_config.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_env.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_inputsplit.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_json.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_lockfree.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_logging.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_main.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_optional.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_param.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_parser.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_serializer.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_tempdir.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_thread_group.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_threaditer.cc delete mode 100644 ml-xgboost/dmlc-core/test/unittest/unittest_threaditer_exc_handling.cc delete mode 100644 ml-xgboost/dmlc-core/tracker/README.md delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc-submit delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/__init__.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/kubernetes.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/launcher.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/local.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/mesos.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/mpi.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/opts.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/sge.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/slurm.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/ssh.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/submit.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/tracker.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/util.py delete mode 100644 ml-xgboost/dmlc-core/tracker/dmlc_tracker/yarn.py delete mode 100644 ml-xgboost/dmlc-core/tracker/yarn/.gitignore delete mode 100644 ml-xgboost/dmlc-core/tracker/yarn/README.md delete mode 100644 ml-xgboost/dmlc-core/tracker/yarn/build.bat delete mode 100644 ml-xgboost/dmlc-core/tracker/yarn/build.sh delete mode 100644 ml-xgboost/dmlc-core/tracker/yarn/pom.xml delete mode 100644 ml-xgboost/dmlc-core/tracker/yarn/src/main/java/org/apache/hadoop/yarn/dmlc/ApplicationMaster.java delete mode 100644 ml-xgboost/dmlc-core/tracker/yarn/src/main/java/org/apache/hadoop/yarn/dmlc/Client.java delete mode 100644 ml-xgboost/dmlc-core/tracker/yarn/src/main/java/org/apache/hadoop/yarn/dmlc/TaskRecord.java delete mode 100644 ml-xgboost/dmlc-core/windows/.gitignore delete mode 100644 ml-xgboost/dmlc-core/windows/README.md delete mode 100644 ml-xgboost/dmlc-core/windows/dmlc.sln delete mode 100644 ml-xgboost/dmlc-core/windows/dmlc/dmlc.vcxproj delete mode 100644 ml-xgboost/doc/.gitignore delete mode 100644 ml-xgboost/doc/Doxyfile.in delete mode 100644 ml-xgboost/doc/Makefile delete mode 100644 ml-xgboost/doc/R-package/.gitignore delete mode 100644 ml-xgboost/doc/R-package/Makefile delete mode 100644 ml-xgboost/doc/R-package/discoverYourData.md delete mode 100644 ml-xgboost/doc/R-package/index.rst delete mode 100644 ml-xgboost/doc/R-package/xgboostPresentation.md delete mode 100644 ml-xgboost/doc/README delete mode 100644 ml-xgboost/doc/build.rst delete mode 100644 ml-xgboost/doc/c++.rst delete mode 100644 ml-xgboost/doc/c.rst delete mode 100644 ml-xgboost/doc/cli.rst delete mode 100644 ml-xgboost/doc/conf.py delete mode 100644 ml-xgboost/doc/contrib/coding_guide.rst delete mode 100644 ml-xgboost/doc/contrib/community.rst delete mode 100644 ml-xgboost/doc/contrib/docs.rst delete mode 100644 ml-xgboost/doc/contrib/donate.rst delete mode 100644 ml-xgboost/doc/contrib/git_guide.rst delete mode 100644 ml-xgboost/doc/contrib/index.rst delete mode 100644 ml-xgboost/doc/contrib/release.rst delete mode 100644 ml-xgboost/doc/contrib/unit_tests.rst delete mode 100644 ml-xgboost/doc/dump.schema delete mode 100644 ml-xgboost/doc/faq.rst delete mode 100644 ml-xgboost/doc/get_started.rst delete mode 100644 ml-xgboost/doc/gpu/index.rst delete mode 100644 ml-xgboost/doc/index.rst delete mode 100644 ml-xgboost/doc/julia.rst delete mode 100644 ml-xgboost/doc/jvm/index.rst delete mode 100644 ml-xgboost/doc/jvm/java_intro.rst delete mode 100644 ml-xgboost/doc/jvm/javadocs/index.rst delete mode 100644 ml-xgboost/doc/jvm/scaladocs/xgboost4j-flink/index.rst delete mode 100644 ml-xgboost/doc/jvm/scaladocs/xgboost4j-spark/index.rst delete mode 100644 ml-xgboost/doc/jvm/scaladocs/xgboost4j/index.rst delete mode 100644 ml-xgboost/doc/jvm/xgboost4j_spark_tutorial.rst delete mode 100644 ml-xgboost/doc/model.schema delete mode 100644 ml-xgboost/doc/parameter.rst delete mode 100644 ml-xgboost/doc/python/convert_090to100.py delete mode 100644 ml-xgboost/doc/python/index.rst delete mode 100644 ml-xgboost/doc/python/python_api.rst delete mode 100644 ml-xgboost/doc/python/python_intro.rst delete mode 100644 ml-xgboost/doc/requirements.txt delete mode 100644 ml-xgboost/doc/sphinx_util.py delete mode 100644 ml-xgboost/doc/tutorials/aft_survival_analysis.rst delete mode 100644 ml-xgboost/doc/tutorials/aws_yarn.rst delete mode 100644 ml-xgboost/doc/tutorials/custom_metric_obj.rst delete mode 100644 ml-xgboost/doc/tutorials/dart.rst delete mode 100644 ml-xgboost/doc/tutorials/dask.rst delete mode 100644 ml-xgboost/doc/tutorials/external_memory.rst delete mode 100644 ml-xgboost/doc/tutorials/feature_interaction_constraint.rst delete mode 100644 ml-xgboost/doc/tutorials/index.rst delete mode 100644 ml-xgboost/doc/tutorials/input_format.rst delete mode 100644 ml-xgboost/doc/tutorials/kubernetes.rst delete mode 100644 ml-xgboost/doc/tutorials/model.rst delete mode 100644 ml-xgboost/doc/tutorials/monotonic.rst delete mode 100644 ml-xgboost/doc/tutorials/param_tuning.rst delete mode 100644 ml-xgboost/doc/tutorials/rf.rst delete mode 100644 ml-xgboost/doc/tutorials/saving_model.rst delete mode 100644 ml-xgboost/include/xgboost/base.h delete mode 100644 ml-xgboost/include/xgboost/c_api.h delete mode 100644 ml-xgboost/include/xgboost/data.h delete mode 100644 ml-xgboost/include/xgboost/feature_map.h delete mode 100644 ml-xgboost/include/xgboost/gbm.h delete mode 100644 ml-xgboost/include/xgboost/generic_parameters.h delete mode 100644 ml-xgboost/include/xgboost/host_device_vector.h delete mode 100644 ml-xgboost/include/xgboost/json.h delete mode 100644 ml-xgboost/include/xgboost/json_io.h delete mode 100644 ml-xgboost/include/xgboost/learner.h delete mode 100644 ml-xgboost/include/xgboost/linear_updater.h delete mode 100644 ml-xgboost/include/xgboost/logging.h delete mode 100644 ml-xgboost/include/xgboost/metric.h delete mode 100644 ml-xgboost/include/xgboost/model.h delete mode 100644 ml-xgboost/include/xgboost/objective.h delete mode 100644 ml-xgboost/include/xgboost/parameter.h delete mode 100644 ml-xgboost/include/xgboost/predictor.h delete mode 100644 ml-xgboost/include/xgboost/span.h delete mode 100644 ml-xgboost/include/xgboost/tree_model.h delete mode 100644 ml-xgboost/include/xgboost/tree_updater.h delete mode 100644 ml-xgboost/include/xgboost/version_config.h delete mode 100644 ml-xgboost/jvm-packages/.gitignore delete mode 100644 ml-xgboost/jvm-packages/CMakeLists.txt delete mode 100644 ml-xgboost/jvm-packages/README.md delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/LICENSE delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/README.md delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/pom.xml delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/java/ml/dmlc/xgboost4j/java/example/BasicWalkThrough.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/java/ml/dmlc/xgboost4j/java/example/BoostFromPrediction.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/java/ml/dmlc/xgboost4j/java/example/CrossValidation.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/java/ml/dmlc/xgboost4j/java/example/CustomObjective.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/java/ml/dmlc/xgboost4j/java/example/ExternalMemory.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/java/ml/dmlc/xgboost4j/java/example/GeneralizedLinearModel.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/java/ml/dmlc/xgboost4j/java/example/PredictFirstNtree.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/java/ml/dmlc/xgboost4j/java/example/PredictLeafIndices.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/java/ml/dmlc/xgboost4j/java/example/util/CustomEval.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/java/ml/dmlc/xgboost4j/java/example/util/DataLoader.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/BasicWalkThrough.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/BoostFromPrediction.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/CrossValidation.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/CustomObjective.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/ExternalMemory.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/GeneralizedLinearModel.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/PredictFirstNTree.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/PredictLeafIndices.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/flink/DistTrainWithFlink.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkMLlibPipeline.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkTraining.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/util/CustomEval.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-flink/pom.xml delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-flink/src/main/scala/ml/dmlc/xgboost4j/scala/flink/XGBoost.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-flink/src/main/scala/ml/dmlc/xgboost4j/scala/flink/XGBoostModel.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark-client/pom.xml delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark-client/src/main/scala/ml/dmlc/xgboost4j/scala/spark/XGBoost.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark-client/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/LearningTaskParams.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark-kernel-client/pom.xml delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark-kernel-client/src/main/scala/ml/dmlc/xgboost4j/scala/spark/XGBoostUtil.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/checkstyle-suppressions.xml delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/checkstyle.xml delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/pom.xml delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/scalastyle-config.xml delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/DataUtils.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/XGBoost.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/XGBoostClassifier.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/XGBoostEstimatorCommon.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/XGBoostRegressor.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/XGBoostTrainingSummary.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/package.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/BoosterParams.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/CustomParams.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/DefaultXGBoostParamsReader.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/DefaultXGBoostParamsWriter.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/GeneralParams.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/InferenceParams.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/LearningTaskParams.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/NonParamVariables.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/RabitParams.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/Utils.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/src/main/scala/org/apache/spark/SparkParallelismTracker.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/LICENSE delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/pom.xml delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/Booster.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/DMatrix.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/DataBatch.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/ExternalCheckpointManager.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/IEvaluation.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/IObjective.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/IRabitTracker.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/NativeLibLoader.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/Rabit.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/RabitTracker.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/TrackerProperties.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/XGBoost.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/XGBoostError.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/XGBoostJNI.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/util/BigDenseMatrix.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/java/ml/dmlc/xgboost4j/java/util/UtilUnsafe.java delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/resources/xgboost4j-version.properties delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/LabeledPoint.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/Booster.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/DMatrix.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/EvalTrait.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/ExternalCheckpointManager.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/ObjectiveTrait.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/XGBoost.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/rabit/RabitTracker.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/rabit/handler/RabitTrackerHandler.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/rabit/handler/RabitWorkerHandler.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/rabit/util/LinkMap.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/rabit/util/RabitTrackerHelpers.scala delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/native/xgboost4j.cpp delete mode 100644 ml-xgboost/jvm-packages/boostkit-xgboost4j/src/native/xgboost4j.h delete mode 100644 ml-xgboost/jvm-packages/checkstyle-suppressions.xml delete mode 100644 ml-xgboost/jvm-packages/checkstyle.xml delete mode 100644 ml-xgboost/jvm-packages/create_jni.py delete mode 100644 ml-xgboost/jvm-packages/dev/.gitattributes delete mode 100644 ml-xgboost/jvm-packages/dev/.gitignore delete mode 100644 ml-xgboost/jvm-packages/dev/Dockerfile delete mode 100644 ml-xgboost/jvm-packages/dev/build-linux.cmd delete mode 100644 ml-xgboost/jvm-packages/dev/build-linux.sh delete mode 100644 ml-xgboost/jvm-packages/dev/change_version.sh delete mode 100644 ml-xgboost/jvm-packages/dev/package-linux.sh delete mode 100644 ml-xgboost/jvm-packages/pom.xml delete mode 100644 ml-xgboost/jvm-packages/scalastyle-config.xml delete mode 100644 ml-xgboost/kernel_include/README.md delete mode 100644 ml-xgboost/kernel_include/boostkit_xgboost_kernel/bbgen.h delete mode 100644 ml-xgboost/kernel_include/boostkit_xgboost_kernel/rabit_intrinsics.h delete mode 100644 ml-xgboost/kernel_include/boostkit_xgboost_kernel/update_quantile_hist_kernel.h delete mode 100644 ml-xgboost/kernel_include/boostkit_xgboost_kernel_client/CMakeLists.txt delete mode 100644 ml-xgboost/kernel_include/boostkit_xgboost_kernel_client/bbgen.cpp delete mode 100644 ml-xgboost/kernel_include/boostkit_xgboost_kernel_client/rabit_intrinsics.cpp delete mode 100644 ml-xgboost/kernel_include/boostkit_xgboost_kernel_client/update_quantile_hist_kernel.cpp delete mode 100644 ml-xgboost/plugin/CMakeLists.txt delete mode 100644 ml-xgboost/plugin/README.md delete mode 100644 ml-xgboost/plugin/dense_parser/dense_libsvm.cc delete mode 100644 ml-xgboost/plugin/example/README.md delete mode 100644 ml-xgboost/plugin/example/custom_obj.cc delete mode 100644 ml-xgboost/plugin/lz4/sparse_page_lz4_format.cc delete mode 100644 ml-xgboost/plugin/updater_gpu/README.md delete mode 100644 ml-xgboost/python-package/.gitignore delete mode 100644 ml-xgboost/python-package/.pylintrc delete mode 100644 ml-xgboost/python-package/MANIFEST.in delete mode 100644 ml-xgboost/python-package/README.rst delete mode 100644 ml-xgboost/python-package/setup.cfg delete mode 100644 ml-xgboost/python-package/setup.py delete mode 100644 ml-xgboost/python-package/xgboost/VERSION delete mode 100644 ml-xgboost/python-package/xgboost/__init__.py delete mode 100644 ml-xgboost/python-package/xgboost/callback.py delete mode 100644 ml-xgboost/python-package/xgboost/compat.py delete mode 100644 ml-xgboost/python-package/xgboost/core.py delete mode 100644 ml-xgboost/python-package/xgboost/dask.py delete mode 100644 ml-xgboost/python-package/xgboost/libpath.py delete mode 100644 ml-xgboost/python-package/xgboost/plotting.py delete mode 100644 ml-xgboost/python-package/xgboost/rabit.py delete mode 100644 ml-xgboost/python-package/xgboost/sklearn.py delete mode 100644 ml-xgboost/python-package/xgboost/tracker.py delete mode 100644 ml-xgboost/python-package/xgboost/training.py delete mode 100644 ml-xgboost/rabit/.gitignore delete mode 100644 ml-xgboost/rabit/.travis.yml delete mode 100644 ml-xgboost/rabit/CMakeLists.txt delete mode 100644 ml-xgboost/rabit/LICENSE delete mode 100644 ml-xgboost/rabit/Makefile delete mode 100644 ml-xgboost/rabit/README.md delete mode 100644 ml-xgboost/rabit/cmake/Config.cmake.in delete mode 100644 ml-xgboost/rabit/cmake/googletest-download.cmake delete mode 100644 ml-xgboost/rabit/cmake/googletest.cmake delete mode 100644 ml-xgboost/rabit/doc/.gitignore delete mode 100644 ml-xgboost/rabit/doc/Doxyfile delete mode 100644 ml-xgboost/rabit/doc/Makefile delete mode 100644 ml-xgboost/rabit/doc/conf.py delete mode 100644 ml-xgboost/rabit/doc/cpp_api.md delete mode 100644 ml-xgboost/rabit/doc/guide.md delete mode 100644 ml-xgboost/rabit/doc/index.md delete mode 100644 ml-xgboost/rabit/doc/parameters.md delete mode 100644 ml-xgboost/rabit/doc/python-requirements.txt delete mode 100644 ml-xgboost/rabit/doc/python_api.md delete mode 100644 ml-xgboost/rabit/doc/sphinx_util.py delete mode 100644 ml-xgboost/rabit/guide/Makefile delete mode 100644 ml-xgboost/rabit/guide/README delete mode 100644 ml-xgboost/rabit/guide/basic.cc delete mode 100644 ml-xgboost/rabit/guide/basic.py delete mode 100644 ml-xgboost/rabit/guide/broadcast.cc delete mode 100644 ml-xgboost/rabit/guide/broadcast.py delete mode 100644 ml-xgboost/rabit/guide/lazy_allreduce.cc delete mode 100644 ml-xgboost/rabit/guide/lazy_allreduce.py delete mode 100644 ml-xgboost/rabit/include/rabit/c_api.h delete mode 100644 ml-xgboost/rabit/include/rabit/internal/engine.h delete mode 100644 ml-xgboost/rabit/include/rabit/internal/io.h delete mode 100644 ml-xgboost/rabit/include/rabit/internal/rabit-inl.h delete mode 100644 ml-xgboost/rabit/include/rabit/internal/socket.h delete mode 100644 ml-xgboost/rabit/include/rabit/internal/thread_local.h delete mode 100644 ml-xgboost/rabit/include/rabit/internal/timer.h delete mode 100644 ml-xgboost/rabit/include/rabit/internal/utils.h delete mode 100644 ml-xgboost/rabit/include/rabit/rabit.h delete mode 100644 ml-xgboost/rabit/include/rabit/serializable.h delete mode 100644 ml-xgboost/rabit/python/rabit.py delete mode 100644 ml-xgboost/rabit/scripts/mpi_build.sh delete mode 100644 ml-xgboost/rabit/scripts/travis_runtest.sh delete mode 100644 ml-xgboost/rabit/scripts/travis_script.sh delete mode 100644 ml-xgboost/rabit/scripts/travis_setup.sh delete mode 100644 ml-xgboost/rabit/src/CMakeLists.txt delete mode 100644 ml-xgboost/rabit/src/README.md delete mode 100644 ml-xgboost/rabit/src/allreduce_base.cc delete mode 100644 ml-xgboost/rabit/src/allreduce_base.h delete mode 100644 ml-xgboost/rabit/src/allreduce_mock.h delete mode 100644 ml-xgboost/rabit/src/allreduce_robust-inl.h delete mode 100644 ml-xgboost/rabit/src/allreduce_robust.cc delete mode 100644 ml-xgboost/rabit/src/allreduce_robust.h delete mode 100644 ml-xgboost/rabit/src/c_api.cc delete mode 100644 ml-xgboost/rabit/src/engine.cc delete mode 100644 ml-xgboost/rabit/src/engine_base.cc delete mode 100644 ml-xgboost/rabit/src/engine_empty.cc delete mode 100644 ml-xgboost/rabit/src/engine_mock.cc delete mode 100644 ml-xgboost/rabit/src/engine_mpi.cc delete mode 100644 ml-xgboost/rabit/test/.gitignore delete mode 100644 ml-xgboost/rabit/test/Makefile delete mode 100644 ml-xgboost/rabit/test/README.md delete mode 100644 ml-xgboost/rabit/test/cpp/CMakeLists.txt delete mode 100644 ml-xgboost/rabit/test/cpp/README.md delete mode 100644 ml-xgboost/rabit/test/cpp/allreduce_base_test.cc delete mode 100644 ml-xgboost/rabit/test/cpp/allreduce_base_test.cpp delete mode 100644 ml-xgboost/rabit/test/cpp/allreduce_mock_test.cc delete mode 100644 ml-xgboost/rabit/test/cpp/allreduce_mock_test.cpp delete mode 100644 ml-xgboost/rabit/test/cpp/allreduce_robust_test.cc delete mode 100644 ml-xgboost/rabit/test/cpp/test_io.cc delete mode 100644 ml-xgboost/rabit/test/cpp/test_main.cpp delete mode 100644 ml-xgboost/rabit/test/lazy_recover.cc delete mode 100644 ml-xgboost/rabit/test/local_recover.cc delete mode 100644 ml-xgboost/rabit/test/local_recover.py delete mode 100644 ml-xgboost/rabit/test/model_recover.cc delete mode 100644 ml-xgboost/rabit/test/speed_runner.py delete mode 100644 ml-xgboost/rabit/test/speed_test.cc delete mode 100644 ml-xgboost/rabit/test/test.mk delete mode 100644 ml-xgboost/src/CMakeLists.txt delete mode 100644 ml-xgboost/src/c_api/c_api.cc delete mode 100644 ml-xgboost/src/c_api/c_api.cu delete mode 100644 ml-xgboost/src/c_api/c_api_error.cc delete mode 100644 ml-xgboost/src/c_api/c_api_error.h delete mode 100644 ml-xgboost/src/cli_main.cc delete mode 100644 ml-xgboost/src/common/base64.h delete mode 100644 ml-xgboost/src/common/bitfield.h delete mode 100644 ml-xgboost/src/common/column_matrix.h delete mode 100644 ml-xgboost/src/common/common.cc delete mode 100644 ml-xgboost/src/common/common.cu delete mode 100644 ml-xgboost/src/common/common.h delete mode 100644 ml-xgboost/src/common/compressed_iterator.h delete mode 100644 ml-xgboost/src/common/config.h delete mode 100644 ml-xgboost/src/common/device_helpers.cu delete mode 100644 ml-xgboost/src/common/device_helpers.cuh delete mode 100644 ml-xgboost/src/common/group_data.h delete mode 100644 ml-xgboost/src/common/hist_util.cc delete mode 100644 ml-xgboost/src/common/hist_util.cu delete mode 100644 ml-xgboost/src/common/hist_util.h delete mode 100644 ml-xgboost/src/common/host_device_vector.cc delete mode 100644 ml-xgboost/src/common/host_device_vector.cu delete mode 100644 ml-xgboost/src/common/io.cc delete mode 100644 ml-xgboost/src/common/io.h delete mode 100644 ml-xgboost/src/common/json.cc delete mode 100644 ml-xgboost/src/common/math.h delete mode 100644 ml-xgboost/src/common/observer.h delete mode 100644 ml-xgboost/src/common/probability_distribution.cc delete mode 100644 ml-xgboost/src/common/probability_distribution.h delete mode 100644 ml-xgboost/src/common/quantile.h delete mode 100644 ml-xgboost/src/common/random.h delete mode 100644 ml-xgboost/src/common/row_set.h delete mode 100644 ml-xgboost/src/common/survival_util.cc delete mode 100644 ml-xgboost/src/common/survival_util.h delete mode 100644 ml-xgboost/src/common/threading_utils.h delete mode 100644 ml-xgboost/src/common/timer.cc delete mode 100644 ml-xgboost/src/common/timer.cu delete mode 100644 ml-xgboost/src/common/timer.h delete mode 100644 ml-xgboost/src/common/transform.h delete mode 100644 ml-xgboost/src/common/version.cc delete mode 100644 ml-xgboost/src/common/version.h delete mode 100644 ml-xgboost/src/data/adapter.h delete mode 100644 ml-xgboost/src/data/array_interface.h delete mode 100644 ml-xgboost/src/data/data.cc delete mode 100644 ml-xgboost/src/data/data.cu delete mode 100644 ml-xgboost/src/data/device_adapter.cuh delete mode 100644 ml-xgboost/src/data/device_dmatrix.cu delete mode 100644 ml-xgboost/src/data/device_dmatrix.h delete mode 100644 ml-xgboost/src/data/ellpack_page.cc delete mode 100644 ml-xgboost/src/data/ellpack_page.cu delete mode 100644 ml-xgboost/src/data/ellpack_page.cuh delete mode 100644 ml-xgboost/src/data/ellpack_page_raw_format.cu delete mode 100644 ml-xgboost/src/data/ellpack_page_source.cc delete mode 100644 ml-xgboost/src/data/ellpack_page_source.cu delete mode 100644 ml-xgboost/src/data/ellpack_page_source.h delete mode 100644 ml-xgboost/src/data/simple_batch_iterator.h delete mode 100644 ml-xgboost/src/data/simple_dmatrix.cc delete mode 100644 ml-xgboost/src/data/simple_dmatrix.cu delete mode 100644 ml-xgboost/src/data/simple_dmatrix.h delete mode 100644 ml-xgboost/src/data/sparse_page_dmatrix.cc delete mode 100644 ml-xgboost/src/data/sparse_page_dmatrix.h delete mode 100644 ml-xgboost/src/data/sparse_page_raw_format.cc delete mode 100644 ml-xgboost/src/data/sparse_page_source.h delete mode 100644 ml-xgboost/src/data/sparse_page_writer.h delete mode 100644 ml-xgboost/src/gbm/gblinear.cc delete mode 100644 ml-xgboost/src/gbm/gblinear_model.cc delete mode 100644 ml-xgboost/src/gbm/gblinear_model.h delete mode 100644 ml-xgboost/src/gbm/gbm.cc delete mode 100644 ml-xgboost/src/gbm/gbtree.cc delete mode 100644 ml-xgboost/src/gbm/gbtree.h delete mode 100644 ml-xgboost/src/gbm/gbtree_model.cc delete mode 100644 ml-xgboost/src/gbm/gbtree_model.h delete mode 100644 ml-xgboost/src/learner.cc delete mode 100644 ml-xgboost/src/linear/coordinate_common.h delete mode 100644 ml-xgboost/src/linear/linear_updater.cc delete mode 100644 ml-xgboost/src/linear/param.h delete mode 100644 ml-xgboost/src/linear/updater_coordinate.cc delete mode 100644 ml-xgboost/src/linear/updater_gpu_coordinate.cu delete mode 100644 ml-xgboost/src/linear/updater_shotgun.cc delete mode 100644 ml-xgboost/src/logging.cc delete mode 100644 ml-xgboost/src/metric/elementwise_metric.cc delete mode 100644 ml-xgboost/src/metric/elementwise_metric.cu delete mode 100644 ml-xgboost/src/metric/metric.cc delete mode 100644 ml-xgboost/src/metric/metric_common.h delete mode 100644 ml-xgboost/src/metric/multiclass_metric.cc delete mode 100644 ml-xgboost/src/metric/multiclass_metric.cu delete mode 100644 ml-xgboost/src/metric/rank_metric.cc delete mode 100644 ml-xgboost/src/metric/rank_metric.cu delete mode 100644 ml-xgboost/src/metric/survival_metric.cc delete mode 100644 ml-xgboost/src/objective/aft_obj.cc delete mode 100644 ml-xgboost/src/objective/hinge.cc delete mode 100644 ml-xgboost/src/objective/hinge.cu delete mode 100644 ml-xgboost/src/objective/multiclass_obj.cc delete mode 100644 ml-xgboost/src/objective/multiclass_obj.cu delete mode 100644 ml-xgboost/src/objective/objective.cc delete mode 100644 ml-xgboost/src/objective/rank_obj.cc delete mode 100644 ml-xgboost/src/objective/rank_obj.cu delete mode 100644 ml-xgboost/src/objective/regression_loss.h delete mode 100644 ml-xgboost/src/objective/regression_obj.cc delete mode 100644 ml-xgboost/src/objective/regression_obj.cu delete mode 100644 ml-xgboost/src/predictor/cpu_predictor.cc delete mode 100644 ml-xgboost/src/predictor/gpu_predictor.cu delete mode 100644 ml-xgboost/src/predictor/predictor.cc delete mode 100644 ml-xgboost/src/tree/constraints.cc delete mode 100644 ml-xgboost/src/tree/constraints.cu delete mode 100644 ml-xgboost/src/tree/constraints.cuh delete mode 100644 ml-xgboost/src/tree/constraints.h delete mode 100644 ml-xgboost/src/tree/gpu_hist/gradient_based_sampler.cu delete mode 100644 ml-xgboost/src/tree/gpu_hist/gradient_based_sampler.cuh delete mode 100644 ml-xgboost/src/tree/gpu_hist/histogram.cu delete mode 100644 ml-xgboost/src/tree/gpu_hist/histogram.cuh delete mode 100644 ml-xgboost/src/tree/gpu_hist/row_partitioner.cu delete mode 100644 ml-xgboost/src/tree/gpu_hist/row_partitioner.cuh delete mode 100644 ml-xgboost/src/tree/param.cc delete mode 100644 ml-xgboost/src/tree/param.h delete mode 100644 ml-xgboost/src/tree/split_evaluator.cc delete mode 100644 ml-xgboost/src/tree/split_evaluator.h delete mode 100644 ml-xgboost/src/tree/tree_model.cc delete mode 100644 ml-xgboost/src/tree/tree_updater.cc delete mode 100644 ml-xgboost/src/tree/updater_basemaker-inl.h delete mode 100644 ml-xgboost/src/tree/updater_colmaker.cc delete mode 100644 ml-xgboost/src/tree/updater_gpu_common.cuh delete mode 100644 ml-xgboost/src/tree/updater_gpu_hist.cu delete mode 100644 ml-xgboost/src/tree/updater_histmaker.cc delete mode 100644 ml-xgboost/src/tree/updater_prune.cc delete mode 100644 ml-xgboost/src/tree/updater_quantile_hist.cc delete mode 100644 ml-xgboost/src/tree/updater_quantile_hist.h delete mode 100644 ml-xgboost/src/tree/updater_refresh.cc delete mode 100644 ml-xgboost/src/tree/updater_skmaker.cc delete mode 100644 ml-xgboost/src/tree/updater_sync.cc delete mode 100644 ml-xgboost/tests/cpp/common/test_partition_builder.cc delete mode 100644 tools/kal-test/README.md delete mode 100644 tools/kal-test/bin/graph/betweenness_run.sh delete mode 100644 tools/kal-test/bin/graph/betweenness_run_opensource.sh delete mode 100644 tools/kal-test/bin/graph/bfs_run.sh delete mode 100644 tools/kal-test/bin/graph/bfs_run_opensource.sh delete mode 100755 tools/kal-test/bin/graph/cc_run.sh delete mode 100755 tools/kal-test/bin/graph/cc_run_raw.sh delete mode 100755 tools/kal-test/bin/graph/cd_run.sh delete mode 100644 tools/kal-test/bin/graph/closeness_run.sh delete mode 100644 tools/kal-test/bin/graph/closeness_run_hive.sh delete mode 100644 tools/kal-test/bin/graph/clusteringcoefficient_run.sh delete mode 100644 tools/kal-test/bin/graph/clusteringcoefficient_run_opensource.sh delete mode 100644 tools/kal-test/bin/graph/degree_run.sh delete mode 100644 tools/kal-test/bin/graph/degree_run_raw.sh delete mode 100644 tools/kal-test/bin/graph/incpr_run.sh delete mode 100755 tools/kal-test/bin/graph/kcore_run.sh delete mode 100755 tools/kal-test/bin/graph/kcore_run_hive.sh delete mode 100755 tools/kal-test/bin/graph/kcore_run_raw.sh delete mode 100644 tools/kal-test/bin/graph/louvain_run.sh delete mode 100644 tools/kal-test/bin/graph/louvain_run_hive.sh delete mode 100755 tools/kal-test/bin/graph/lpa_run.sh delete mode 100755 tools/kal-test/bin/graph/lpa_run_raw.sh delete mode 100644 tools/kal-test/bin/graph/mce_run.sh delete mode 100644 tools/kal-test/bin/graph/mce_run_hive.sh delete mode 100755 tools/kal-test/bin/graph/modularity_run.sh delete mode 100755 tools/kal-test/bin/graph/mssp_run.sh delete mode 100644 tools/kal-test/bin/graph/node2vec_run.sh delete mode 100644 tools/kal-test/bin/graph/node2vec_run_opensource.sh delete mode 100644 tools/kal-test/bin/graph/ppr_run.sh delete mode 100644 tools/kal-test/bin/graph/ppr_run_raw.sh delete mode 100644 tools/kal-test/bin/graph/pr_run.sh delete mode 100644 tools/kal-test/bin/graph/pr_run_hive.sh delete mode 100644 tools/kal-test/bin/graph/pr_run_raw.sh delete mode 100755 tools/kal-test/bin/graph/scc_run.sh delete mode 100755 tools/kal-test/bin/graph/scc_run_raw.sh delete mode 100644 tools/kal-test/bin/graph/sgm_run.sh delete mode 100644 tools/kal-test/bin/graph/sgm_run_opensource.sh delete mode 100644 tools/kal-test/bin/graph/tc_run.sh delete mode 100644 tools/kal-test/bin/graph/tc_run_raw.sh delete mode 100644 tools/kal-test/bin/graph/tr_run.sh delete mode 100755 tools/kal-test/bin/graph/wce_run.sh delete mode 100755 tools/kal-test/bin/graph/wce_run_hive.sh delete mode 100644 tools/kal-test/bin/graph/wpr_run.sh delete mode 100755 tools/kal-test/bin/ml/als_run.sh delete mode 100755 tools/kal-test/bin/ml/als_run_raw.sh delete mode 100644 tools/kal-test/bin/ml/cov_run.sh delete mode 100644 tools/kal-test/bin/ml/cov_run_raw.sh delete mode 100644 tools/kal-test/bin/ml/dbscan_run.sh delete mode 100644 tools/kal-test/bin/ml/dbscan_run_opensource.sh delete mode 100755 tools/kal-test/bin/ml/dt_run.sh delete mode 100755 tools/kal-test/bin/ml/dt_run_raw.sh delete mode 100755 tools/kal-test/bin/ml/gbdt_run.sh delete mode 100755 tools/kal-test/bin/ml/gbdt_run_raw.sh delete mode 100755 tools/kal-test/bin/ml/idf_run.sh delete mode 100755 tools/kal-test/bin/ml/idf_run_raw.sh delete mode 100755 tools/kal-test/bin/ml/kmeans_run.sh delete mode 100755 tools/kal-test/bin/ml/kmeans_run_raw.sh delete mode 100644 tools/kal-test/bin/ml/knn_run.sh delete mode 100644 tools/kal-test/bin/ml/knn_run_raw.sh delete mode 100644 tools/kal-test/bin/ml/lda_run.sh delete mode 100644 tools/kal-test/bin/ml/lda_run_raw.sh delete mode 100644 tools/kal-test/bin/ml/linR_run.sh delete mode 100644 tools/kal-test/bin/ml/linR_run_raw.sh delete mode 100644 tools/kal-test/bin/ml/logR_run.sh delete mode 100644 tools/kal-test/bin/ml/logR_run_raw.sh delete mode 100644 tools/kal-test/bin/ml/pca_run.sh delete mode 100644 tools/kal-test/bin/ml/pca_run_kml.sh delete mode 100644 tools/kal-test/bin/ml/pca_run_raw.sh delete mode 100755 tools/kal-test/bin/ml/pearson_run.sh delete mode 100755 tools/kal-test/bin/ml/pearson_run_raw.sh delete mode 100644 tools/kal-test/bin/ml/ps_run.sh delete mode 100644 tools/kal-test/bin/ml/ps_run_raw.sh delete mode 100755 tools/kal-test/bin/ml/rf_run.sh delete mode 100755 tools/kal-test/bin/ml/rf_run_raw.sh delete mode 100644 tools/kal-test/bin/ml/simrank_run.sh delete mode 100644 tools/kal-test/bin/ml/spca_run.sh delete mode 100644 tools/kal-test/bin/ml/spca_run_raw.sh delete mode 100644 tools/kal-test/bin/ml/spearman_run.sh delete mode 100644 tools/kal-test/bin/ml/spearman_run_raw.sh delete mode 100644 tools/kal-test/bin/ml/svd_run.sh delete mode 100644 tools/kal-test/bin/ml/svd_run_kml.sh delete mode 100644 tools/kal-test/bin/ml/svd_run_raw.sh delete mode 100755 tools/kal-test/bin/ml/svm_run.sh delete mode 100755 tools/kal-test/bin/ml/svm_run_raw.sh delete mode 100755 tools/kal-test/bin/ml/xgbt_run.sh delete mode 100755 tools/kal-test/bin/ml/xgbt_run_raw.sh delete mode 100644 tools/kal-test/bin/workflow.sh delete mode 100644 tools/kal-test/conf/graph/betweenness/betweenness.yml delete mode 100644 tools/kal-test/conf/graph/betweenness/betweenness_spark.properties delete mode 100644 tools/kal-test/conf/graph/betweenness/betweenness_spark_opensource.properties delete mode 100644 tools/kal-test/conf/graph/bfs/bfs.yml delete mode 100644 tools/kal-test/conf/graph/bfs/bfs_opensource.properties delete mode 100644 tools/kal-test/conf/graph/bfs/bfs_source_id.properties delete mode 100644 tools/kal-test/conf/graph/bfs/bfs_spark.properties delete mode 100644 tools/kal-test/conf/graph/bfs/bfs_spark_opensource.properties delete mode 100644 tools/kal-test/conf/graph/cc/cc.yml delete mode 100644 tools/kal-test/conf/graph/cc/cc_spark.properties delete mode 100644 tools/kal-test/conf/graph/cc/cc_spark_raw.properties delete mode 100644 tools/kal-test/conf/graph/cd/cd.yml delete mode 100644 tools/kal-test/conf/graph/cd/cd_spark_aarch64.properties delete mode 100644 tools/kal-test/conf/graph/cd/cd_spark_raw.properties delete mode 100644 tools/kal-test/conf/graph/cd/cd_spark_x86_64.properties delete mode 100644 tools/kal-test/conf/graph/closeness/closeness.yml delete mode 100644 tools/kal-test/conf/graph/closeness/closeness_spark.properties delete mode 100644 tools/kal-test/conf/graph/clusteringcoefficient/clusteringcoefficient.yml delete mode 100644 tools/kal-test/conf/graph/clusteringcoefficient/clusteringcoefficient_spark.properties delete mode 100644 tools/kal-test/conf/graph/clusteringcoefficient/clusteringcoefficient_spark_opensource.properties delete mode 100644 tools/kal-test/conf/graph/degree/degree.yml delete mode 100644 tools/kal-test/conf/graph/degree/degree_spark.properties delete mode 100644 tools/kal-test/conf/graph/degree/degree_spark_raw.properties delete mode 100644 tools/kal-test/conf/graph/graph_datasets.properties delete mode 100644 tools/kal-test/conf/graph/incpr/incpr.yml delete mode 100644 tools/kal-test/conf/graph/incpr/incpr_spark.properties delete mode 100644 tools/kal-test/conf/graph/kcore/kcore.yml delete mode 100644 tools/kal-test/conf/graph/kcore/kcore_spark.properties delete mode 100644 tools/kal-test/conf/graph/kcore/kcore_spark_raw.properties delete mode 100644 tools/kal-test/conf/graph/louvain/louvain.yml delete mode 100644 tools/kal-test/conf/graph/louvain/louvain_spark.properties delete mode 100644 tools/kal-test/conf/graph/lpa/lpa.yml delete mode 100644 tools/kal-test/conf/graph/lpa/lpa_spark.properties delete mode 100644 tools/kal-test/conf/graph/lpa/lpa_spark_raw.properties delete mode 100644 tools/kal-test/conf/graph/mce/mce.yml delete mode 100644 tools/kal-test/conf/graph/mce/mce_spark.properties delete mode 100644 tools/kal-test/conf/graph/modularity/modularity.yml delete mode 100644 tools/kal-test/conf/graph/modularity/modularity_spark.properties delete mode 100644 tools/kal-test/conf/graph/mssp/mssp.yml delete mode 100644 tools/kal-test/conf/graph/mssp/mssp_spark.properties delete mode 100644 tools/kal-test/conf/graph/node2vec/node2vec.yml delete mode 100644 tools/kal-test/conf/graph/node2vec/node2vec_spark.properties delete mode 100644 tools/kal-test/conf/graph/node2vec/node2vec_spark_opensource.properties delete mode 100644 tools/kal-test/conf/graph/ppr/ppr.yml delete mode 100644 tools/kal-test/conf/graph/ppr/ppr_source_id.properties delete mode 100644 tools/kal-test/conf/graph/ppr/ppr_spark.properties delete mode 100644 tools/kal-test/conf/graph/ppr/ppr_spark_raw.properties delete mode 100644 tools/kal-test/conf/graph/pr/pr.yml delete mode 100644 tools/kal-test/conf/graph/pr/pr_spark.properties delete mode 100644 tools/kal-test/conf/graph/pr/pr_spark_raw.properties delete mode 100644 tools/kal-test/conf/graph/scc/scc.yml delete mode 100644 tools/kal-test/conf/graph/scc/scc_spark.properties delete mode 100644 tools/kal-test/conf/graph/scc/scc_x86.yml delete mode 100644 tools/kal-test/conf/graph/sgm/sgm.yml delete mode 100644 tools/kal-test/conf/graph/sgm/sgm_spark.properties delete mode 100644 tools/kal-test/conf/graph/sgm/sgm_spark_opensource.properties delete mode 100644 tools/kal-test/conf/graph/tc/tc.yml delete mode 100644 tools/kal-test/conf/graph/tc/tc_spark.properties delete mode 100644 tools/kal-test/conf/graph/tr/tr.yml delete mode 100644 tools/kal-test/conf/graph/tr/tr_spark.properties delete mode 100644 tools/kal-test/conf/graph/wce/wce.yml delete mode 100644 tools/kal-test/conf/graph/wce/wce_spark.properties delete mode 100644 tools/kal-test/conf/graph/wpr/wpr.yml delete mode 100644 tools/kal-test/conf/graph/wpr/wpr_spark.properties delete mode 100644 tools/kal-test/conf/ml/als/als.yml delete mode 100644 tools/kal-test/conf/ml/als/als_raw.yml delete mode 100644 tools/kal-test/conf/ml/als/als_spark.properties delete mode 100644 tools/kal-test/conf/ml/als/als_spark_raw.properties delete mode 100644 tools/kal-test/conf/ml/cov/cov.yml delete mode 100644 tools/kal-test/conf/ml/cov/cov_spark.properties delete mode 100644 tools/kal-test/conf/ml/dbscan/dbscan.yml delete mode 100644 tools/kal-test/conf/ml/dbscan/dbscan_spark.properties delete mode 100644 tools/kal-test/conf/ml/dbscan/dbscan_spark_opensource.properties delete mode 100644 tools/kal-test/conf/ml/dt/dt_arm.yml delete mode 100644 tools/kal-test/conf/ml/dt/dt_spark.properties delete mode 100644 tools/kal-test/conf/ml/dt/dt_spark_raw.properties delete mode 100644 tools/kal-test/conf/ml/dt/dt_x86.yml delete mode 100644 tools/kal-test/conf/ml/dt/dt_x86_raw.yml delete mode 100644 tools/kal-test/conf/ml/gbdt/gbdt.yml delete mode 100644 tools/kal-test/conf/ml/gbdt/gbdt_spark.properties delete mode 100644 tools/kal-test/conf/ml/idf/idf.yml delete mode 100644 tools/kal-test/conf/ml/idf/idf_spark.properties delete mode 100644 tools/kal-test/conf/ml/kmeans/kmeans.yml delete mode 100644 tools/kal-test/conf/ml/kmeans/kmeans_spark.properties delete mode 100644 tools/kal-test/conf/ml/knn/knn.yml delete mode 100644 tools/kal-test/conf/ml/knn/knn_raw.yml delete mode 100644 tools/kal-test/conf/ml/knn/knn_spark.properties delete mode 100644 tools/kal-test/conf/ml/knn/knn_spark_raw.properties delete mode 100644 tools/kal-test/conf/ml/lda/lda.yml delete mode 100644 tools/kal-test/conf/ml/lda/lda_spark.properties delete mode 100644 tools/kal-test/conf/ml/linR/linR.yml delete mode 100644 tools/kal-test/conf/ml/linR/linR_raw.yml delete mode 100644 tools/kal-test/conf/ml/linR/linR_spark.properties delete mode 100644 tools/kal-test/conf/ml/linR/linR_spark_raw.properties delete mode 100644 tools/kal-test/conf/ml/logR/logR.yml delete mode 100644 tools/kal-test/conf/ml/logR/logR_spark.properties delete mode 100644 tools/kal-test/conf/ml/ml_datasets.properties delete mode 100644 tools/kal-test/conf/ml/pca/pca.yml delete mode 100644 tools/kal-test/conf/ml/pca/pca_spark.properties delete mode 100644 tools/kal-test/conf/ml/pearson/pearson.yml delete mode 100644 tools/kal-test/conf/ml/pearson/pearson_raw.yml delete mode 100644 tools/kal-test/conf/ml/pearson/pearson_spark.properties delete mode 100644 tools/kal-test/conf/ml/pearson/pearson_spark_raw.properties delete mode 100644 tools/kal-test/conf/ml/ps/ps.yml delete mode 100644 tools/kal-test/conf/ml/ps/ps_spark.properties delete mode 100644 tools/kal-test/conf/ml/rf/rf_arm.yml delete mode 100644 tools/kal-test/conf/ml/rf/rf_spark.properties delete mode 100644 tools/kal-test/conf/ml/rf/rf_spark_raw.properties delete mode 100644 tools/kal-test/conf/ml/rf/rf_x86.yml delete mode 100644 tools/kal-test/conf/ml/rf/rf_x86_raw.yml delete mode 100644 tools/kal-test/conf/ml/simrank/simrank.yml delete mode 100644 tools/kal-test/conf/ml/simrank/simrank_spark.properties delete mode 100644 tools/kal-test/conf/ml/spca/spca.yml delete mode 100644 tools/kal-test/conf/ml/spca/spca_raw.yml delete mode 100644 tools/kal-test/conf/ml/spca/spca_spark.properties delete mode 100644 tools/kal-test/conf/ml/spca/spca_spark_raw.properties delete mode 100644 tools/kal-test/conf/ml/spearman/spearman.yml delete mode 100644 tools/kal-test/conf/ml/spearman/spearman_spark.properties delete mode 100644 tools/kal-test/conf/ml/svd/svd.yml delete mode 100644 tools/kal-test/conf/ml/svd/svd_raw.yml delete mode 100644 tools/kal-test/conf/ml/svd/svd_spark.properties delete mode 100644 tools/kal-test/conf/ml/svd/svd_spark_raw.properties delete mode 100644 tools/kal-test/conf/ml/svm/svm.yml delete mode 100644 tools/kal-test/conf/ml/svm/svm_spark.properties delete mode 100644 tools/kal-test/conf/ml/xgbt/xgbt_arm.yml delete mode 100644 tools/kal-test/conf/ml/xgbt/xgbt_spark.properties delete mode 100644 tools/kal-test/conf/ml/xgbt/xgbt_spark_raw.properties delete mode 100644 tools/kal-test/conf/ml/xgbt/xgbt_x86.yml delete mode 100644 tools/kal-test/pom.xml delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/BFSRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/BetweennessRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/ClosenessHiveRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/ClosenessRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/ClusteringCoefficientRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/ConnectedComponentsRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/CycleDetectionWithConstrainsRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/DegreeRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/IncPageRankRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/KCoreDecompositionHiveRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/KCoreDecompositionRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/KcoreMain.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/LabelPropagationRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/LouvainHiveRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/LouvainRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/MSSPRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/MaximalCliqueEnumerationHiveRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/MaximalCliqueEnumerationRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/ModularityRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/Node2VecRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/PageRankHiveRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/PageRankRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/PersonalizedPageRankRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/StronglyConnectedComponentsRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/SubgraphMatchingRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/TrangleCountRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/TrustRankRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/Util.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/WCEHiveRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/WCERunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/graph/WeightedPageRankRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/ALSRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/CovRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/DTRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/GBDTRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/IDFRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/KMeansRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/KNNRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/LDARunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/LinRRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/LogRRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/PCARunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/PearsonRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/PrefixSpanRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/RFRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/SPCARunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/SVDRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/SVMRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/SimRankRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/SpearManRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/ml/XGBTRunner.scala delete mode 100644 tools/kal-test/src/main/scala/com/bigdata/utils/Utils.scala delete mode 100644 tools/kal-test/src/main/scala/org/apache/spark/ml/classification/KNNClassifier.scala delete mode 100644 tools/kal-test/src/main/scala/org/apache/spark/ml/clustering/DBSCANRunner.scala delete mode 100644 tools/kal-test/src/main/scala/org/apache/spark/ml/knn/KNN.scala delete mode 100644 tools/kal-test/src/main/scala/org/apache/spark/ml/knn/MetricTree.scala delete mode 100644 tools/kal-test/src/main/scala/org/apache/spark/ml/recommendation/SimRankOpenSource.scala delete mode 100644 tools/kal-test/src/main/scala/org/apache/spark/ml/regression/KNNRegression.scala delete mode 100644 tools/kal-test/src/main/scala/org/apache/spark/mllib/knn/KNNUtils.scala diff --git a/README.md b/README.md index c1f5210..97493a2 100644 --- a/README.md +++ b/README.md @@ -5,9 +5,9 @@ Introduction ============ -The machine learning algorithm library running on Kunpeng processors is an acceleration library that provides a rich set of high-level tools for machine learning algorithms. It is based on the original APIs of Apache [Spark 2.3.2](https://github.com/apache/spark/tree/v2.3.2), [breeze 0.13.1](https://github.com/scalanlp/breeze/tree/releases/v0.13.1) and [xgboost 1.1.0](https://github.com/dmlc/xgboost/tree/release_1.0.0). The acceleration library for greatly improves the computing power in big data scenarios. +The machine learning algorithm library running on Kunpeng processors is an acceleration library that provides a rich set of high-level tools for machine learning algorithms. It is based on the original APIs of Apache [Spark 3.1.1](https://github.com/apache/spark/tree/v3.1.1). The acceleration library for greatly improves the computing power in big data scenarios. -The library provides 21 machine learning algorithms: support vector machine (SVM), random forest classifier (RFC), gradient boosting decision tree (GBDT), decision tree (DT), K-means clustering, linear regression, logistic regression algorithm, principal component analysis (PCA), principal component analysis for Sparse Matrix(SPCA), singular value decomposition (SVD), latent dirichlet allocation (LDA), prefix-projected pattern prowth (Prefix-Span), alternating least squares (ALS), K-nearest neighbors (KNN), Covariance, Density-based spatial clustering of applicaitons with noise (DBSCAN), Pearson, Spearman, XGboost, Inverse Document Frequency(IDF), and SimRank. You can find the latest documentation on the project web page. This README file contains only basic setup instructions. +The library provides 5 machine learning algorithms: latent dirichlet allocation (LDA), prefix-projected pattern prowth (Prefix-Span), alternating least squares (ALS), K-nearest neighbors (KNN), Density-based spatial clustering of applicaitons with noise (DBSCAN). You can find the latest documentation on the project web page. This README file contains only basic setup instructions. You can find the latest documentation, including a programming guide, on the project web page. This README file only contains basic setup instructions. @@ -21,17 +21,9 @@ Building And Packageing mvn clean package -(2) Build XGBoost project under the "Spark-ml-algo-lib/ml-xgboost/jvm-packages" directory: +(2) Obtain "boostkit-ml-core_2.12-2.1.0-spark3.1.1.jar" under the "Spark-ml-algo-lib/ml-core/target" directory. - mvn clean package - -(3) Obtain "boostkit-ml-core_2.11-2.1.0-spark2.3.2.jar" under the "Spark-ml-algo-lib/ml-core/target" directory. - - Obtain "boostkit-ml-acc_2.11-2.1.0-spark2.3.2.jar" under the "Spark-ml-algo-lib/ml-accelerator/target" directory. - - Obtain "boostkit-xgboost4j_2.11-2.1.0.jar" under the "Spark-ml-algo-lib/ml-xgboost/jvm-packages/boostkit-xgboost4j/target" directory. - - Obtain "boostkit-xgboost4j-spark2.3.2_2.11-2.1.0.jar" under the "Spark-ml-algo-lib/ml-xgboost/jvm-packages/boostkit-xgboost4j-spark/target" directory. + Obtain "boostkit-ml-acc_2.12-2.1.0-spark3.1.1.jar" under the "Spark-ml-algo-lib/ml-accelerator/target" directory. Contribution Guidelines diff --git a/ml-accelerator/pom.xml b/ml-accelerator/pom.xml index 5c9f082..0af611b 100644 --- a/ml-accelerator/pom.xml +++ b/ml-accelerator/pom.xml @@ -6,7 +6,7 @@ 4.0.0 - boostkit-ml-acc_2.11 + boostkit-ml-acc_2.12 2.1.0 ${project.artifactId} Spark ml algo accelerator @@ -14,14 +14,14 @@ org.apache.spark - boostkit-ml-core_2.11 + boostkit-ml-core_2.12 ${project.version} ${spark.version} org.apache.spark - boostkit-ml-kernel-client_2.11 + boostkit-ml-kernel-client_2.12 ${project.version} ${spark.version} compile diff --git a/ml-accelerator/src/main/scala/breeze/optimize/FirstOrderMinimizerX.scala b/ml-accelerator/src/main/scala/breeze/optimize/FirstOrderMinimizerX.scala deleted file mode 100644 index 053f103..0000000 --- a/ml-accelerator/src/main/scala/breeze/optimize/FirstOrderMinimizerX.scala +++ /dev/null @@ -1,291 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ - -package breeze.optimize - -import scala.language.implicitConversions - -import FirstOrderMinimizerX.ConvergenceCheck -import breeze.linalg.norm -import breeze.math.{MutableInnerProductModule, NormedModule} -import breeze.util.Implicits._ -import breeze.util.SerializableLogging - -/** - * - * @author dlwh - */ -abstract class FirstOrderMinimizerX[T, DF<:StochasticDiffFunction[T]] -(val convergenceCheck: ConvergenceCheck[T]) -(implicit space: MutableInnerProductModule[T, Double]) - extends Minimizer[T, DF] with SerializableLogging { - - def this(maxIter: Int = -1, tolerance: Double = 1E-6, - fvalMemory: Int = 100, relativeTolerance: Boolean = true) - (implicit space: MutableInnerProductModule[T, Double]) = - this(FirstOrderMinimizerX.defaultConvergenceCheckX[T] - (maxIter, tolerance, relativeTolerance, fvalMemory)) - - var inertiaCoefficient: Double = 0.5 - val momentumUpdateCoefficient : Double = 0.9 - - def setInertiaCoefficient(a: Double): Unit = { - this.inertiaCoefficient = a - } - - /** - * Any history the derived minimization function needs to do its updates. - * typically an approximation - * to the second derivative/hessian matrix. - */ - type History - type State = FirstOrderMinimizerX.State[T, convergenceCheck.Info, History] - - - - - protected def initialHistory(f: DF, init: T): History - protected def adjustFunction(f: DF): DF = f - protected def adjust(newX: T, newGrad: T, newVal: Double): (Double, T) = (newVal, newGrad) - protected def chooseDescentDirection(state: State, f: DF): T - protected def takeStep(state: State, dir: T, stepSize: Double): T - protected def updateHistory(newX: T, newGrad: T, newVal: Double, f: DF, oldState: State): History - protected def updateTheta(f: DF, state: State): (T, T) - - - protected def initialState(f: DF, init: T): State = { - val x = init - val history = initialHistory(f, init) - val (value, grad) = calculateObjective(f, x, history) - val (adjValue, adjGrad) = adjust(x, grad, value) - import space._ - val copyInit = space.copy(init) - copyInit -= copyInit - FirstOrderMinimizerX.State(x, - value, - grad, - adjValue, - adjGrad, - 0, - adjValue, - history, - convergenceCheck.initialInfo, - copyInit) - } - - - protected def calculateObjective(f: DF, x: T, history: History): (Double, T) = { - f.calculate(x) - } - - def infiniteIterations(f: DF, state: State): Iterator[State] = { - var failedOnce = false - val adjustedFun = adjustFunction(f) - import space._ - - Iterator.iterate(state) { state => try { - val (x, currentMomentum) = updateTheta(adjustedFun, state) - // the Func used to update the theta, sub class overide it. - val (value, grad) = calculateObjective(adjustedFun, x, state.history) - val (adjValue, adjGrad) = adjust(x, grad, value) - val oneOffImprovement = (state.adjustedValue - adjValue)/ - (state.adjustedValue.abs max adjValue.abs max 1E-6 * state.initialAdjVal.abs) - logger.info(f"Val and Grad Norm: $adjValue%.6g (rel: " + - f"$oneOffImprovement%.3g) ${norm(adjGrad)}%.6g") - val history = updateHistory(x, grad, value, adjustedFun, state) - val newCInfo = convergenceCheck - .update(x, - grad, - value, - state, - state.convergenceInfo) - failedOnce = false - FirstOrderMinimizerX - .State(x, - value, - grad, - adjValue, - adjGrad, - state.iter + 1, - state.initialAdjVal, - history, - newCInfo, - currentMomentum) - } catch { - case x: FirstOrderException if !failedOnce => - failedOnce = true - logger.error(s"Failure! Resetting history: $x") - state.copy(history = initialHistory(adjustedFun, state.x)) - case x: FirstOrderException => - logger.error("Failure again! Giving up and returning. " + - "Maybe the objective is just poorly behaved?") - state.copy(searchFailed = true) - } - } - } - - def iterations(f: DF, init: T): Iterator[State] = { - val adjustedFun = adjustFunction(f) - infiniteIterations(f, initialState(adjustedFun, init)) - .takeUpToWhere{s => - convergenceCheck.apply(s, s.convergenceInfo) match { - case Some(converged) => - logger.info(s"Converged because ${converged.reason}") - true - case None => - false - } - } - } - - def minimize(f: DF, init: T): T = { - minimizeAndReturnState(f, init).x - } - - - def minimizeAndReturnState(f: DF, init: T): State = { - iterations(f, init).last - } -} - - -object FirstOrderMinimizerX { - - /** - * Tracks the information about the optimizer, including the current point, - * its value, gradient, and then any history. - * Also includes information for checking convergence. - * @param x the current point being considered - * @param value f(x) - * @param grad f.gradientAt(x) - * @param adjustedValue f(x) + r(x) - * @param adjustedGradient f'(x) + r'(x) - * @param iter what iteration number we are on. - * @param initialAdjVal f(x_0) + r(x_0), used for checking convergence - * @param history any information needed by the optimizer to do updates. - * @param searchFailed did the line search fail? - */ - case class State[+T, +ConvergenceInfo, +History](x: T, - value: Double, - grad: T, - adjustedValue: Double, - adjustedGradient: T, - iter: Int, - initialAdjVal: Double, - history: History, - convergenceInfo: ConvergenceInfo, - momentum: T, - searchFailed: Boolean = false) { - } - - trait ConvergenceCheck[T] { - type Info - def initialInfo: Info - def apply(state: State[T, _, _], info: Info): Option[ConvergenceReason] - def update(newX: T, - newGrad: T, - newVal: Double, - oldState: State[T, _, _], - oldInfo: Info): Info - def ||(otherCheck: ConvergenceCheck[T]): - ConvergenceCheck[T] = orElse(otherCheck) - - def orElse(other: ConvergenceCheck[T]): - ConvergenceCheck[T] = { - SequenceConvergenceCheck(asChecks ++ other.asChecks) - } - - protected def asChecks: - IndexedSeq[ConvergenceCheck[T]] = IndexedSeq(this) - } - - object ConvergenceCheck { - implicit def fromPartialFunction[T](pf: PartialFunction[State[T, _, _], ConvergenceReason]): - ConvergenceCheck[T] = new ConvergenceCheck[T] { - override type Info = Unit - - def update(newX: T, - newGrad: T, - newVal: Double, - oldState: State[T, _, _], - oldInfo: Info): - Info = oldInfo - - override def apply(state: State[T, _, _], info: Info): - Option[ConvergenceReason] = pf.lift(state) - - override def initialInfo: Info = () - } - } - - case class SequenceConvergenceCheck[T](checks: IndexedSeq[ConvergenceCheck[T]]) - extends ConvergenceCheck[T] { - type Info = IndexedSeq[ConvergenceCheck[T]#Info] - - override def initialInfo: IndexedSeq[ConvergenceCheck[T]#Info] = checks.map(_.initialInfo) - - override def update(newX: T, - newGrad: T, - newVal: Double, - oldState: State[T, _, _], - oldInfo: Info): - Info = { - require(oldInfo.length == checks.length) - (checks zip oldInfo).map { case (c, i) => - c.update(newX, - newGrad, - newVal, - oldState, - i.asInstanceOf[c.Info]) } - } - - override def apply(state: State[T, _, _], - info: IndexedSeq[ConvergenceCheck[T]#Info]): Option[ConvergenceReason] = { - (checks zip info).iterator.flatMap { case (c, i) => - c(state, i.asInstanceOf[c.Info])}.toStream.headOption - } - } - - - trait ConvergenceReason { - def reason: String - } - case object MaxIterations extends ConvergenceReason { - override def reason: String = "max iterations reached" - } - - case object GradientConverged extends ConvergenceReason { - override def reason: String = "gradient converged" - } - - def maxIterationsReached[T](maxIter: Int): - ConvergenceCheck[T] = ConvergenceCheck.fromPartialFunction { - case s: State[_, _, _] if (s.iter >= maxIter && maxIter >= 0) => - MaxIterations - } - - def gradientConverged[T](tolerance: Double, relative: Boolean = true) - (implicit space: NormedModule[T, Double]): ConvergenceCheck[T] = { - import space.normImpl - ConvergenceCheck.fromPartialFunction[T] { - case s: State[T, _, _] if (norm(s.adjustedGradient) < tolerance - * (if (relative) s.adjustedValue else 1.0)) => - GradientConverged - } - } - - - def defaultConvergenceCheckX[T](maxIter: Int, tolerance: Double, - relative: Boolean = true, fvalMemory: Int = 20) - (implicit space: NormedModule[T, Double]): ConvergenceCheck[T] = - ( - maxIterationsReached[T](maxIter) || - gradientConverged[T](tolerance, relative) - ) - -} diff --git a/ml-accelerator/src/main/scala/breeze/optimize/LBFGSX.scala b/ml-accelerator/src/main/scala/breeze/optimize/LBFGSX.scala deleted file mode 100644 index e0b99d7..0000000 --- a/ml-accelerator/src/main/scala/breeze/optimize/LBFGSX.scala +++ /dev/null @@ -1,110 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ - -package breeze.optimize - -/* - Copyright 2009 David Hall, Daniel Ramage - - Licensed under the Apache License, Version 2.0 (the "License") - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import breeze.linalg._ -import breeze.math.MutableInnerProductModule -import breeze.optimize.FirstOrderMinimizerX.ConvergenceCheck -import breeze.util.SerializableLogging - -/** - * Port of LBFGS to Scala. - * - * Special note for LBFGS: - * If you use it in published work, you must cite one of: - * * J. Nocedal. Updating Quasi-Newton Matrices with Limited Storage - * (1980), Mathematics of Computation 35, pp. 773-782. - * * D.C. Liu and J. Nocedal. On the Limited mem Method for Large - * Scale Optimization (1989), Mathematical Programming B, 45, 3, - * pp. 503-528. - * - * @param m: The memory of the search. 3 to 7 is usually sufficient. - */ -class LBFGSX[T](convergenceCheck: ConvergenceCheck[T], m: Int) - (implicit space: MutableInnerProductModule[T, Double]) extends - FirstOrderMinimizerX[T, DiffFunction[T]](convergenceCheck) with SerializableLogging { - - def this(maxIter: Int = -1, m: Int = 7, tolerance: Double = 1E-9) - (implicit space: MutableInnerProductModule[T, Double]) = - this(FirstOrderMinimizerX.defaultConvergenceCheckX(maxIter, tolerance), m ) - import space._ - require(m > 0) - - type History = LBFGSX.ApproximateInverseHessianX[T] - - override protected def adjustFunction(f: DiffFunction[T]): DiffFunction[T] = f.cached - - def takeStep(state: State, dir: T, stepSize: Double): T = state.x + dir * stepSize - protected def initialHistory(f: DiffFunction[T], x: T): - History = new LBFGSX.ApproximateInverseHessianX(m) - protected def chooseDescentDirection(state: State, fn: DiffFunction[T]): T = { - state.history * state.grad - } - - protected def updateHistory(newX: T, newGrad: T, newVal: Double, - f: DiffFunction[T], oldState: State): History = { - oldState.history.updated(newX - oldState.x, newGrad -:- oldState.grad) - } - - - override def updateTheta(f: DiffFunction[T], state: State): (T, T) = { - val adjustedFun = adjustFunction(f) - val dir = chooseDescentDirection(state, adjustedFun) - val currentMomentum = ACC - .updateMomentum(state.momentum, dir, inertiaCoefficient, momentumUpdateCoefficient)(space) - val stepSize = 1.0 - logger.info(f"Step Size: $stepSize%.4g") - val x = takeStep(state, currentMomentum, stepSize) - (x, currentMomentum) - } -} - -object LBFGSX { - case class ApproximateInverseHessianX[T](m: Int, - private[LBFGSX] val memStep: IndexedSeq[T] = IndexedSeq.empty, - private[LBFGSX] val memGradDelta: IndexedSeq[T] = IndexedSeq.empty) - (implicit space: MutableInnerProductModule[T, Double]) - extends NumericOps[ApproximateInverseHessianX[T]] { - - import space._ - - def repr: ApproximateInverseHessianX[T] = this - - def updated(step: T, gradDelta: T): ApproximateInverseHessianX[T] = { - val (a, b) = ACC.update(step, gradDelta, this.memStep, this.memGradDelta, m)(space) - new ApproximateInverseHessianX(m, a, b) - } - - - def historyLength: Int = memStep.length - - def *(grad: T): T = { - val a = ACC.getInverseOfHessian(grad, this.memStep, this.memGradDelta, m, historyLength) - a - } - } - -} - diff --git a/ml-accelerator/src/main/scala/breeze/optimize/OWLQNX.scala b/ml-accelerator/src/main/scala/breeze/optimize/OWLQNX.scala deleted file mode 100644 index 8e55560..0000000 --- a/ml-accelerator/src/main/scala/breeze/optimize/OWLQNX.scala +++ /dev/null @@ -1,97 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ - -package breeze.optimize - -import breeze.math._ -import breeze.numerics._ -import breeze.util._ - -/** - * Implements the Orthant-wise Limited Memory QuasiNewton method, - * which is a variant of LBFGS that handles L1 regularization. - * - * Paper is Andrew and Gao (2007) Scalable Training of L1-Regularized Log-Linear Models - * - * @author dlwh - */ -class OWLQNX[K, T](maxIter: Int, m: Int, l1reg: K => Double, tolerance: Double) - (implicit space: MutableEnumeratedCoordinateField[T, K, Double]) - extends LBFGSX[T](maxIter, m, tolerance = tolerance) with SerializableLogging { - - def this(maxIter: Int, m: Int, l1reg: K => Double) - (implicit space: MutableEnumeratedCoordinateField[T, K, Double]) - = this(maxIter, m, l1reg, 1E-8) - - def this(maxIter: Int, m: Int, l1reg: Double, tolerance: Double = 1E-8) - (implicit space: MutableEnumeratedCoordinateField[T, K, Double]) - = this(maxIter, m, (_: K) => l1reg, tolerance) - - def this(maxIter: Int, m: Int, l1reg: Double) - (implicit space: MutableEnumeratedCoordinateField[T, K, Double]) - = this(maxIter, m, (_: K) => l1reg, 1E-8) - - def this(maxIter: Int, m: Int)(implicit space: MutableEnumeratedCoordinateField[T, K, Double]) - = this(maxIter, m, (_: K) => 1.0, 1E-8) - - require(m > 0) - - import space._ - - override def chooseDescentDirection(state: State, fn: DiffFunction[T]): T = { - val descentDir = super.chooseDescentDirection(state.copy(grad = state.adjustedGradient), fn) - val correctedDir = space.zipMapValues.map(descentDir, state.adjustedGradient, { case (d, g) - => if (d * g < 0) d else 0.0 }) - - correctedDir - } - - - // projects x to be on the same orthant as y - // this basically requires that x'_i = x_i if sign(x_i) == sign(y_i), and 0 otherwise. - - override def takeStep(state: State, dir: T, stepSize: Double): T = { - val stepped = state.x + dir * stepSize - val orthant = computeOrthant(state.x, state.adjustedGradient) - space.zipMapValues.map(stepped, orthant, { case (v, ov) => - v * I(math.signum(v) == math.signum(ov)) - }) - } - - // Adds in the regularization stuff to the gradient - override def adjust(newX: T, newGrad: T, newVal: Double): (Double, T) = { - var adjValue = newVal - val res = space.zipMapKeyValues.mapActive(newX, newGrad, {case (i, xv, v) => - val l1regValue = l1reg(i) - require(l1regValue >= 0.0) - - if(l1regValue == 0.0) { - v - } else { - adjValue += Math.abs(l1regValue * xv) - xv match { - case 0.0 => - val delta_+ = v + l1regValue - val delta_- = v - l1regValue - if (delta_- > 0) delta_- else if (delta_+ < 0) delta_+ else 0.0 - case _ => v + math.signum(xv) * l1regValue - } - } - }) - adjValue -> res - } - - private def computeOrthant(x: T, grad: T) = { - val orth = space.zipMapValues.map(x, grad, {case (v, gv) => - if (v != 0) math.signum(v) - else math.signum(-gv) - }) - orth - } - -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala deleted file mode 100644 index 53ee5ee..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/DecisionTreeClassifier.scala +++ /dev/null @@ -1,160 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.classification - -import org.apache.spark.annotation.Since -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.linalg.Vector -import org.apache.spark.ml.param.ParamMap -import org.apache.spark.ml.tree._ -import org.apache.spark.ml.tree.impl.DecisionForest -import org.apache.spark.ml.util._ -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, Strategy => OldStrategy} -import org.apache.spark.rdd.RDD -import org.apache.spark.sql.Dataset - - -/** - * Decision tree learning algorithm (http://en.wikipedia.org/wiki/Decision_tree_learning) - * for classification. - * It supports both binary and multiclass labels, as well as both continuous and categorical - * features. - */ -@Since("1.4.0") -class DecisionTreeClassifier @Since("1.4.0") ( - @Since("1.4.0") override val uid: String) - extends ProbabilisticClassifier[Vector, DecisionTreeClassifier, DecisionTreeClassificationModel] - with DecisionTreeClassifierParams with DefaultParamsWritable { - - @Since("1.4.0") - def this() = this(Identifiable.randomUID("dtc")) - - // Override parameter setters from parent trait for Java API compatibility. - - /** @group setParam */ - @Since("1.4.0") - override def setMaxDepth(value: Int): this.type = set(maxDepth, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMaxBins(value: Int): this.type = set(maxBins, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInfoGain(value: Double): this.type = set(minInfoGain, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value) - - /** - * Specifies how often to checkpoint the cached node IDs. - * E.g. 10 means that the cache will get checkpointed every 10 iterations. - * This is only used if cacheNodeIds is true and if the checkpoint directory is set in - * [[org.apache.spark.SparkContext]]. - * Must be at least 1. - * (default = 10) - * @group setParam - */ - @Since("1.4.0") - override def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value) - - /** @group setParam */ - @Since("1.4.0") - override def setImpurity(value: String): this.type = set(impurity, value) - - /** @group setParam */ - @Since("1.6.0") - override def setSeed(value: Long): this.type = set(seed, value) - - override protected def train(dataset: Dataset[_]): DecisionTreeClassificationModel = { - val categoricalFeatures: Map[Int, Int] = - MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol))) - val numClasses: Int = getNumClasses(dataset) - - if (isDefined(thresholds)) { - require($(thresholds).length == numClasses, this.getClass.getSimpleName + - ".train() called with non-matching numClasses and thresholds.length." + - s" numClasses=$numClasses, but thresholds has length ${$(thresholds).length}") - } - - val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset, numClasses) - val strategy = getOldStrategy(categoricalFeatures, numClasses) - - val instr = Instrumentation.create(this, oldDataset) - instr.logParams(params: _*) - - val trees = DecisionForest.run(oldDataset, strategy, numTrees = 1, - featureSubsetStrategy = "all", - seed = $(seed), instr = Some(instr), parentUID = Some(uid)) - - val m = trees.head.asInstanceOf[DecisionTreeClassificationModel] - instr.logSuccess(m) - m - } - - /** (private[ml]) Train a decision tree on an RDD */ - private[ml] def train(data: RDD[LabeledPoint], - oldStrategy: OldStrategy): DecisionTreeClassificationModel = { - val instr = Instrumentation.create(this, data) - instr.logParams(params: _*) - - val trees = DecisionForest.run(data, oldStrategy, numTrees = 1, - featureSubsetStrategy = "all", - seed = 0L, instr = Some(instr), parentUID = Some(uid)) - - val m = trees.head.asInstanceOf[DecisionTreeClassificationModel] - instr.logSuccess(m) - m - } - - /** (private[ml]) Create a Strategy instance to use with the old API. */ - private[ml] def getOldStrategy( - categoricalFeatures: Map[Int, Int], - numClasses: Int): OldStrategy = { - super.getOldStrategy(categoricalFeatures, numClasses, OldAlgo.Classification, getOldImpurity, - subsamplingRate = 1.0) - } - - @Since("1.4.1") - override def copy(extra: ParamMap): DecisionTreeClassifier = defaultCopy(extra) -} - -@Since("1.4.0") -object DecisionTreeClassifier extends DefaultParamsReadable[DecisionTreeClassifier] { - /** Accessor for supported impurities: entropy, gini */ - @Since("1.4.0") - final val supportedImpurities: Array[String] = TreeClassifierParams.supportedImpurities - - @Since("2.0.0") - override def load(path: String): DecisionTreeClassifier = super.load(path) -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/GBTClassifier.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/GBTClassifier.scala deleted file mode 100644 index 9b62352..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/GBTClassifier.scala +++ /dev/null @@ -1,423 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.classification - -import com.github.fommil.netlib.BLAS.{getInstance => blas} -import org.json4s.{DefaultFormats, JObject} -import org.json4s.JsonDSL._ - -import org.apache.spark.annotation.Since -import org.apache.spark.internal.Logging -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.linalg.{DenseVector, SparseVector, Vector, Vectors} -import org.apache.spark.ml.param.ParamMap -import org.apache.spark.ml.regression.DecisionTreeRegressionModel -import org.apache.spark.ml.tree._ -import org.apache.spark.ml.tree.impl.GradientBoostedTrees -import org.apache.spark.ml.util._ -import org.apache.spark.ml.util.DefaultParamsReader.Metadata -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo} -import org.apache.spark.mllib.tree.model.{GradientBoostedTreesModel => OldGBTModel} -import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{DataFrame, Dataset, Row} -import org.apache.spark.sql.functions._ - -/** - * Gradient-Boosted Trees (GBTs) (http://en.wikipedia.org/wiki/Gradient_boosting) - * learning algorithm for classification. - * It supports binary labels, as well as both continuous and categorical features. - * - * The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999. - * - * Notes on Gradient Boosting vs. TreeBoost: - * - This implementation is for Stochastic Gradient Boosting, not for TreeBoost. - * - Both algorithms learn tree ensembles by minimizing loss functions. - * - TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes - * based on the loss function, whereas the original gradient boosting method does not. - * - We expect to implement TreeBoost in the future: - * [https://issues.apache.org/jira/browse/SPARK-4240] - * - * @note Multiclass labels are not currently supported. - */ -@Since("1.4.0") -class GBTClassifier @Since("1.4.0") ( - @Since("1.4.0") override val uid: String) - extends ProbabilisticClassifier[Vector, GBTClassifier, GBTClassificationModel] - with GBTClassifierParams with DefaultParamsWritable with Logging { - - @Since("1.4.0") - def this() = this(Identifiable.randomUID("gbtc")) - - // Override parameter setters from parent trait for Java API compatibility. - - // Parameters from TreeClassifierParams: - - /** @group setParam */ - @Since("1.4.0") - override def setMaxDepth(value: Int): this.type = set(maxDepth, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMaxBins(value: Int): this.type = set(maxBins, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInfoGain(value: Double): this.type = set(minInfoGain, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value) - - /** - * Specifies how often to checkpoint the cached node IDs. - * E.g. 10 means that the cache will get checkpointed every 10 iterations. - * This is only used if cacheNodeIds is true and if the checkpoint directory is set in - * [[org.apache.spark.SparkContext]]. - * Must be at least 1. - * (default = 10) - * @group setParam - */ - @Since("1.4.0") - override def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value) - - /** - * The impurity setting is ignored for GBT models. - * Individual trees are built using impurity "Variance." - * - * @group setParam - */ - @Since("1.4.0") - override def setImpurity(value: String): this.type = { - logWarning("GBTClassifier.setImpurity should NOT be used") - this - } - - // Parameters from TreeEnsembleParams: - - /** @group setParam */ - @Since("1.4.0") - override def setSubsamplingRate(value: Double): this.type = set(subsamplingRate, value) - - /** @group setParam */ - @Since("1.4.0") - override def setSeed(value: Long): this.type = set(seed, value) - - // Parameters from GBTParams: - - /** @group setParam */ - @Since("1.4.0") - override def setMaxIter(value: Int): this.type = set(maxIter, value) - - /** @group setParam */ - @Since("1.4.0") - override def setStepSize(value: Double): this.type = set(stepSize, value) - - /** @group setParam */ - @Since("2.3.0") - override def setFeatureSubsetStrategy(value: String): this.type = - set(featureSubsetStrategy, value) - - // Parameters from GBTClassifierParams: - - /** @group setParam */ - @Since("1.4.0") - def setLossType(value: String): this.type = set(lossType, value) - - override protected def train(dataset: Dataset[_]): GBTClassificationModel = { - val categoricalFeatures: Map[Int, Int] = - MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol))) - // We copy and modify this from Classifier.extractLabeledPoints since GBT only supports - // 2 classes now. This lets us provide a more precise error message. - val oldDataset: RDD[LabeledPoint] = - dataset.select(col($(labelCol)), col($(featuresCol))).rdd.map { - case Row(label: Double, features: Vector) => - require(label == 0 || label == 1, s"GBTClassifier was given" + - s" dataset with invalid label $label. Labels must be in {0,1}; note that" + - s" GBTClassifier currently only supports binary classification.") - LabeledPoint(label, features) - } - val numFeatures = oldDataset.first().features.size - val boostingStrategy = super.getOldBoostingStrategy(categoricalFeatures, OldAlgo.Classification) - - val numClasses = 2 - if (isDefined(thresholds)) { - require($(thresholds).length == numClasses, this.getClass.getSimpleName + - ".train() called with non-matching numClasses and thresholds.length." + - s" numClasses=$numClasses, but thresholds has length ${$(thresholds).length}") - } - - val instr = Instrumentation.create(this, oldDataset) - instr.logParams(labelCol, featuresCol, predictionCol, impurity, lossType, - maxDepth, maxBins, maxIter, maxMemoryInMB, minInfoGain, minInstancesPerNode, - seed, stepSize, subsamplingRate, cacheNodeIds, checkpointInterval, featureSubsetStrategy) - instr.logNumFeatures(numFeatures) - instr.logNumClasses(numClasses) - - val (doUseAcc, setUseAccFlag) = super.getDoUseAcc - val (baseLearners, learnerWeights) = if (setUseAccFlag) { - GradientBoostedTrees.run(oldDataset, boostingStrategy, - $(seed), $(featureSubsetStrategy), doUseAcc) - } else { - GradientBoostedTrees.run(oldDataset, boostingStrategy, - $(seed), $(featureSubsetStrategy)) - } - - val m = new GBTClassificationModel(uid, baseLearners, learnerWeights, numFeatures) - instr.logSuccess(m) - m - } - - @Since("1.4.1") - override def copy(extra: ParamMap): GBTClassifier = defaultCopy(extra) -} - -@Since("1.4.0") -object GBTClassifier extends DefaultParamsReadable[GBTClassifier] { - - /** Accessor for supported loss settings: logistic */ - @Since("1.4.0") - final val supportedLossTypes: Array[String] = GBTClassifierParams.supportedLossTypes - - @Since("2.0.0") - override def load(path: String): GBTClassifier = super.load(path) -} - -/** - * Gradient-Boosted Trees (GBTs) (http://en.wikipedia.org/wiki/Gradient_boosting) - * model for classification. - * It supports binary labels, as well as both continuous and categorical features. - * - * @param _trees Decision trees in the ensemble. - * @param _treeWeights Weights for the decision trees in the ensemble. - * - * @note Multiclass labels are not currently supported. - */ -@Since("1.6.0") -class GBTClassificationModel private[ml]( - @Since("1.6.0") override val uid: String, - private val _trees: Array[DecisionTreeRegressionModel], - private val _treeWeights: Array[Double], - @Since("1.6.0") override val numFeatures: Int, - @Since("2.2.0") override val numClasses: Int) - extends ProbabilisticClassificationModel[Vector, GBTClassificationModel] - with GBTClassifierParams with TreeEnsembleModel[DecisionTreeRegressionModel] - with MLWritable with Serializable { - - require(_trees.nonEmpty, "GBTClassificationModel requires at least 1 tree.") - require(_trees.length == _treeWeights.length, "GBTClassificationModel given trees, treeWeights" + - s" of non-matching lengths (${_trees.length}, ${_treeWeights.length}, respectively).") - - /** - * Construct a GBTClassificationModel - * - * @param _trees Decision trees in the ensemble. - * @param _treeWeights Weights for the decision trees in the ensemble. - * @param numFeatures The number of features. - */ - private[ml] def this( - uid: String, - _trees: Array[DecisionTreeRegressionModel], - _treeWeights: Array[Double], - numFeatures: Int) = - this(uid, _trees, _treeWeights, numFeatures, 2) - - /** - * Construct a GBTClassificationModel - * - * @param _trees Decision trees in the ensemble. - * @param _treeWeights Weights for the decision trees in the ensemble. - */ - @Since("1.6.0") - def this(uid: String, _trees: Array[DecisionTreeRegressionModel], _treeWeights: Array[Double]) = - this(uid, _trees, _treeWeights, -1, 2) - - @Since("1.4.0") - override def trees: Array[DecisionTreeRegressionModel] = _trees - - /** - * Number of trees in ensemble - */ - @Since("2.0.0") - val getNumTrees: Int = trees.length - - @Since("1.4.0") - override def treeWeights: Array[Double] = _treeWeights - - override protected def transformImpl(dataset: Dataset[_]): DataFrame = { - val bcastModel = dataset.sparkSession.sparkContext.broadcast(this) - val predictUDF = udf { (features: Any) => - bcastModel.value.predict(features.asInstanceOf[Vector]) - } - dataset.withColumn($(predictionCol), predictUDF(col($(featuresCol)))) - } - - override protected def predict(features: Vector): Double = { - // If thresholds defined, use predictRaw to get probabilities, otherwise use optimization - if (isDefined(thresholds)) { - super.predict(features) - } else { - if (margin(features) > 0.0) 1.0 else 0.0 - } - } - - override protected def predictRaw(features: Vector): Vector = { - val prediction: Double = margin(features) - Vectors.dense(Array(-prediction, prediction)) - } - - override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = { - rawPrediction match { - case dv: DenseVector => - dv.values(0) = loss.computeProbability(dv.values(0)) - dv.values(1) = 1.0 - dv.values(0) - dv - case sv: SparseVector => - throw new RuntimeException("Unexpected error in GBTClassificationModel:" + - " raw2probabilityInPlace encountered SparseVector") - } - } - - /** Number of trees in ensemble */ - val numTrees: Int = trees.length - - @Since("1.4.0") - override def copy(extra: ParamMap): GBTClassificationModel = { - copyValues(new GBTClassificationModel(uid, _trees, _treeWeights, numFeatures, numClasses), - extra).setParent(parent) - } - - @Since("1.4.0") - override def toString: String = { - s"GBTClassificationModel (uid=$uid) with $numTrees trees" - } - - /** - * Estimate of the importance of each feature. - * - * Each feature's importance is the average of its importance across all trees in the ensemble - * The importance vector is normalized to sum to 1. This method is suggested by Hastie et al. - * (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.) - * and follows the implementation from scikit-learn. - - * See `DecisionTreeClassificationModel.featureImportances` - */ - @Since("2.0.0") - lazy val featureImportances: Vector = TreeEnsembleModel.featureImportances(trees, numFeatures) - - /** Raw prediction for the positive class. */ - private def margin(features: Vector): Double = { - val treePredictions = _trees.map(_.rootNode.predictImpl(features).prediction) - blas.ddot(numTrees, treePredictions, 1, _treeWeights, 1) - } - - /** (private[ml]) Convert to a model in the old API */ - private[ml] def toOld: OldGBTModel = { - new OldGBTModel(OldAlgo.Classification, _trees.map(_.toOld), _treeWeights) - } - - // hard coded loss, which is not meant to be changed in the model - private val loss = getOldLossType - - @Since("2.0.0") - override def write: MLWriter = new GBTClassificationModel.GBTClassificationModelWriter(this) -} - -@Since("2.0.0") -object GBTClassificationModel extends MLReadable[GBTClassificationModel] { - - private val numFeaturesKey: String = "numFeatures" - private val numTreesKey: String = "numTrees" - - @Since("2.0.0") - override def read: MLReader[GBTClassificationModel] = new GBTClassificationModelReader - - @Since("2.0.0") - override def load(path: String): GBTClassificationModel = super.load(path) - - private[GBTClassificationModel] - class GBTClassificationModelWriter(instance: GBTClassificationModel) extends MLWriter { - - override protected def saveImpl(path: String): Unit = { - - val extraMetadata: JObject = Map( - numFeaturesKey -> instance.numFeatures, - numTreesKey -> instance.getNumTrees) - EnsembleModelReadWrite.saveImpl(instance, path, sparkSession, extraMetadata) - } - } - - private class GBTClassificationModelReader extends MLReader[GBTClassificationModel] { - - /** Checked against metadata when loading model */ - private val className = classOf[GBTClassificationModel].getName - private val treeClassName = classOf[DecisionTreeRegressionModel].getName - - override def load(path: String): GBTClassificationModel = { - implicit val format = DefaultFormats - val (metadata: Metadata, treesData: Array[(Metadata, Node)], treeWeights: Array[Double]) = - EnsembleModelReadWrite.loadImpl(path, sparkSession, className, treeClassName) - val numFeatures = (metadata.metadata \ numFeaturesKey).extract[Int] - val numTrees = (metadata.metadata \ numTreesKey).extract[Int] - - val trees: Array[DecisionTreeRegressionModel] = treesData.map { - case (treeMetadata, root) => - val tree = - new DecisionTreeRegressionModel(treeMetadata.uid, root, numFeatures) - DefaultParamsReader.getAndSetParams(tree, treeMetadata) - tree - } - require(numTrees == trees.length, s"GBTClassificationModel.load expected $numTrees" + - s" trees based on metadata but found ${trees.length} trees.") - val model = new GBTClassificationModel(metadata.uid, - trees, treeWeights, numFeatures) - DefaultParamsReader.getAndSetParams(model, metadata) - model - } - } - - /** Convert a model from the old API */ - private[ml] def fromOld( - oldModel: OldGBTModel, - parent: GBTClassifier, - categoricalFeatures: Map[Int, Int], - numFeatures: Int = -1, - numClasses: Int = 2): GBTClassificationModel = { - require(oldModel.algo == OldAlgo.Classification, "Cannot convert GradientBoostedTreesModel" + - s" with algo=${oldModel.algo} (old API) to GBTClassificationModel (new API).") - val newTrees = oldModel.trees.map { tree => - // parent for each tree is null since there is no good way to set this. - DecisionTreeRegressionModel.fromOld(tree, null, categoricalFeatures) - } - val uid = if (parent != null) parent.uid else Identifiable.randomUID("gbtc") - new GBTClassificationModel(uid, newTrees, oldModel.treeWeights, numFeatures, numClasses) - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/LinearSVC.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/LinearSVC.scala deleted file mode 100644 index 6689c08..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/LinearSVC.scala +++ /dev/null @@ -1,304 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.classification - -import scala.collection.mutable - -import breeze.linalg.{DenseVector => BDV} -import breeze.optimize.{CachedDiffFunction, OWLQNX => BreezeOWLQN} - -import org.apache.spark.SparkException -import org.apache.spark.annotation.{Experimental, Since} -import org.apache.spark.ml.StaticUtils -import org.apache.spark.ml.feature.Instance -import org.apache.spark.ml.linalg._ -import org.apache.spark.ml.optim.aggregator.HingeAggregatorX -import org.apache.spark.ml.optim.loss.{L2Regularization, RDDLossFunctionX} -import org.apache.spark.ml.param._ -import org.apache.spark.ml.util._ -import org.apache.spark.mllib.linalg.VectorImplicits._ -import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer -import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{Dataset, Row} -import org.apache.spark.sql.functions.{col, lit} - - - -/** - * :: Experimental :: - * - * - * Linear SVM Classifier - * - * This binary classifier optimizes the Hinge Loss using the OWLQN optimizer. - * Only supports L2 regularization currently. - * - */ -@Since("2.2.0") -@Experimental -class LinearSVC @Since("2.2.0") ( - @Since("2.2.0") override val uid: String) - extends Classifier[Vector, LinearSVC, LinearSVCModel] - with LinearSVCParams with DefaultParamsWritable { - - @Since("2.2.0") - def this() = this(Identifiable.randomUID("linearsvc")) - - /** - * Set the regularization parameter. - * Default is 0.0. - * - * @group setParam - */ - - - var ic = 0.5 - var iters = -1 - def setIc(a: Double): Unit = { - this.ic = a - } - - @Since("2.2.0") - def setRegParam(value: Double): this.type = set(regParam, value) - setDefault(regParam -> 0.0) - - /** - * Set the maximum number of iterations. - * Default is 100. - * - * @group setParam - */ - @Since("2.2.0") - def setMaxIter(value: Int): this.type = set(maxIter, value) - setDefault(maxIter -> 100) - - /** - * Whether to fit an intercept term. - * Default is true. - * - * @group setParam - */ - @Since("2.2.0") - def setFitIntercept(value: Boolean): this.type = set(fitIntercept, value) - setDefault(fitIntercept -> true) - - /** - * Set the convergence tolerance of iterations. - * Smaller values will lead to higher accuracy at the cost of more iterations. - * Default is 1E-6. - * - * @group setParam - */ - @Since("2.2.0") - def setTol(value: Double): this.type = set(tol, value) - setDefault(tol -> 0.0) - - /** - * Whether to standardize the training features before fitting the model. - * Default is true. - * - * @group setParam - */ - @Since("2.2.0") - def setStandardization(value: Boolean): this.type = set(standardization, value) - setDefault(standardization -> true) - - /** - * Set the value of param [[weightCol]]. - * If this is not set or empty, we treat all instance weights as 1.0. - * Default is not set, so all instances have weight one. - * - * @group setParam - */ - @Since("2.2.0") - def setWeightCol(value: String): this.type = set(weightCol, value) - - /** - * Set threshold in binary classification. - * - * @group setParam - */ - @Since("2.2.0") - def setThreshold(value: Double): this.type = set(threshold, value) - setDefault(threshold -> 0.0) - - /** - * Suggested depth for treeAggregate (greater than or equal to 2). - * If the dimensions of features or the number of partitions are large, - * this param could be adjusted to a larger size. - * Default is 2. - * - * @group expertSetParam - */ - @Since("2.2.0") - def setAggregationDepth(value: Int): this.type = set(aggregationDepth, value) - setDefault(aggregationDepth -> 2) - - @Since("2.2.0") - override def copy(extra: ParamMap): LinearSVC = defaultCopy(extra) - - override protected def train(dataset: Dataset[_]): LinearSVCModel = { - val w = if (!isDefined(weightCol) || $(weightCol).isEmpty) lit(1.0) else col($(weightCol)) - val instances: RDD[Instance] = - dataset.select(col($(labelCol)), w, col($(featuresCol))).rdd.map { - case Row(label: Double, weight: Double, features: Vector) => - Instance(label, weight, features) - } - - val instr = Instrumentation.create(this, instances) - instr.logParams(regParam, maxIter, fitIntercept, tol, standardization, threshold, - aggregationDepth) - - val (summarizer, labelSummarizer) = { - val seqOp = (c: (MultivariateOnlineSummarizer, MultiClassSummarizer), - instance: Instance) => { - (c._1.add(instance.features, instance.weight), - c._2.add(instance.label, instance.weight + StaticUtils.ZERO_DOUBLE)) - } - - val combOp = (c1: (MultivariateOnlineSummarizer, MultiClassSummarizer), - c2: (MultivariateOnlineSummarizer, MultiClassSummarizer)) => - (c1._1.merge(c2._1), c1._2.merge(c2._2)) - - instances.treeAggregate( - (new MultivariateOnlineSummarizer, new MultiClassSummarizer) - )(seqOp, combOp, $(aggregationDepth)) - } - - val histogram = labelSummarizer.histogram - val numInvalid = labelSummarizer.countInvalid - val numFeatures = summarizer.mean.size - val numFeaturesPlusIntercept = if (getFitIntercept) numFeatures + 1 else numFeatures - - val numClasses = MetadataUtils.getNumClasses(dataset.schema($(labelCol))) match { - case Some(n: Int) => - require(n >= histogram.length, s"Specified number of classes $n was " + - s"less than the number of unique labels ${histogram.length}.") - n - case None => histogram.length - } - require(numClasses == 2, s"LinearSVC only supports binary classification." + - s" $numClasses classes detected in $labelCol") - instr.logNumClasses(numClasses) - instr.logNumFeatures(numFeatures) - - val (coefficientVector, interceptVector, objectiveHistory) = { - if (numInvalid != 0) { - val msg = s"Classification labels should be in [0 to ${numClasses - 1}]. " + - s"Found $numInvalid invalid labels." - logError(msg) - throw new SparkException(msg) - } - - val featuresStd = summarizer.variance.toArray.map(math.sqrt) - val getFeaturesStd = (j: Int) => featuresStd(j) - val regParamL2 = $(regParam) - val bcFeaturesStd = instances.context.broadcast(featuresStd.map{t => if (t!=0.0)1/t else 0.0}) - val regularization = if (regParamL2 != 0.0) { - val shouldApply = (idx: Int) => idx >= 0 && idx < numFeatures - Some(new L2Regularization(regParamL2, shouldApply, - if ($(standardization)) None else Some(getFeaturesStd))) - } else { - None - } - - val getAggregatorFunc = new HingeAggregatorX(bcFeaturesStd, $(fitIntercept))(_) - val costFun = new RDDLossFunctionX(instances, getAggregatorFunc, regularization, - $(aggregationDepth)) - - def regParamL1Fun = (index: Int) => 0D - val optimizer = new BreezeOWLQN[Int, BDV[Double]]($(maxIter), 10, regParamL1Fun, $(tol)) - - var u = ic - try { - u = instances.sparkContext.getConf - .getDouble("spark.boostkit.LinearSVC.inertiaCoefficient", ic) - if (u < 0.0) { - throw new Exception - } - } - catch { - case x: Exception => - throw new Exception("'spark.boostkit.LinearSVC.inertiaCoefficient' value is invalid") - } - this.ic = u - - optimizer.setInertiaCoefficient(ic) - val initialCoefWithIntercept = Vectors.zeros(numFeaturesPlusIntercept) - - val states = optimizer.iterations(new CachedDiffFunction(costFun), - initialCoefWithIntercept.asBreeze.toDenseVector) - - val scaledObjectiveHistory = mutable.ArrayBuilder.make[Double] - var state: optimizer.State = null - while (states.hasNext) { - state = states.next() - scaledObjectiveHistory += state.adjustedValue - iters += 1 - } - - bcFeaturesStd.destroy(blocking = false) - if (state == null) { - val msg = s"${optimizer.getClass.getName} failed." - logError(msg) - throw new SparkException(msg) - } - - /* - The coefficients are trained in the scaled space; we're converting them back to - the original space. - Note that the intercept in scaled space and original space is the same; - as a result, no scaling is needed. - */ - val rawCoefficients = state.x.toArray - val coefficientArray = Array.tabulate(numFeatures) { i => - if (featuresStd(i) != 0.0) { - rawCoefficients(i) / featuresStd(i) - } else { - 0.0 - } - } - - val intercept = if ($(fitIntercept)) { - rawCoefficients(numFeaturesPlusIntercept - 1) - } else { - 0.0 - } - (Vectors.dense(coefficientArray), intercept, scaledObjectiveHistory.result()) - } - - val model = copyValues(new LinearSVCModel(uid, coefficientVector, interceptVector)) - instr.logSuccess(model) - model - } - - def getIters: Int = iters -} - -@Since("2.2.0") -object LinearSVC extends DefaultParamsReadable[LinearSVC] { - - @Since("2.2.0") - override def load(path: String): LinearSVC = super.load(path) -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala deleted file mode 100644 index a47a709..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala +++ /dev/null @@ -1,703 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.classification - -import java.util.Locale - -import scala.collection.mutable - -import breeze.linalg.{DenseVector => BDV} -import breeze.optimize.{CachedDiffFunction, LBFGSL, OWLQNL} - -import org.apache.spark.SparkException -import org.apache.spark.annotation.Since -import org.apache.spark.internal.Logging -import org.apache.spark.ml.StaticUtils -import org.apache.spark.ml.feature.Instance -import org.apache.spark.ml.linalg._ -import org.apache.spark.ml.optim.aggregator.LogisticAggregatorX -import org.apache.spark.ml.optim.loss.{L2Regularization, RDDLossFunctionX} -import org.apache.spark.ml.param._ -import org.apache.spark.ml.util._ -import org.apache.spark.mllib.linalg.VectorImplicits._ -import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer -import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{Dataset, Row} -import org.apache.spark.sql.functions.{col, lit} -import org.apache.spark.storage.StorageLevel - -/** - * Logistic regression. Supports: - * - Multinomial logistic (softmax) regression. - * - Binomial logistic regression. - * - * This class supports fitting traditional logistic regression model by LBFGS/OWLQN and - * bound (box) constrained logistic regression model by LBFGSB. - */ -@Since("1.2.0") -class LogisticRegression @Since("1.2.0") ( - @Since("1.4.0") override val uid: String) - extends ProbabilisticClassifier[Vector, LogisticRegression, LogisticRegressionModel] - with LogisticRegressionParams with DefaultParamsWritable with Logging { - - @Since("1.4.0") - def this() = this(Identifiable.randomUID("logreg")) - - /** - * Set the regularization parameter. - * Default is 0.0. - * - * @group setParam - */ - @Since("1.2.0") - def setRegParam(value: Double): this.type = set(regParam, value) - setDefault(regParam -> 0.0) - - /** - * Set the ElasticNet mixing parameter. - * For alpha = 0, the penalty is an L2 penalty. - * For alpha = 1, it is an L1 penalty. - * For alpha in (0,1), the penalty is a combination of L1 and L2. - * Default is 0.0 which is an L2 penalty. - * - * Note: Fitting under bound constrained optimization only supports L2 regularization, - * so throws exception if this param is non-zero value. - * - * @group setParam - */ - @Since("1.4.0") - def setElasticNetParam(value: Double): this.type = set(elasticNetParam, value) - setDefault(elasticNetParam -> 0.0) - - /** - * Set the maximum number of iterations. - * Default is 100. - * - * @group setParam - */ - @Since("1.2.0") - def setMaxIter(value: Int): this.type = set(maxIter, value) - setDefault(maxIter -> 100) - - /** - * Set the convergence tolerance of iterations. - * Smaller value will lead to higher accuracy at the cost of more iterations. - * Default is 1E-6. - * - * @group setParam - */ - @Since("1.4.0") - def setTol(value: Double): this.type = set(tol, value) - setDefault(tol -> 1E-6) - - /** - * Whether to fit an intercept term. - * Default is true. - * - * @group setParam - */ - @Since("1.4.0") - def setFitIntercept(value: Boolean): this.type = set(fitIntercept, value) - setDefault(fitIntercept -> true) - - /** - * Sets the value of param [[family]]. - * Default is "auto". - * - * @group setParam - */ - @Since("2.1.0") - def setFamily(value: String): this.type = set(family, value) - setDefault(family -> "auto") - - /** - * Whether to standardize the training features before fitting the model. - * The coefficients of models will be always returned on the original scale, - * so it will be transparent for users. Note that with/without standardization, - * the models should be always converged to the same solution when no regularization - * is applied. In R's GLMNET package, the default behavior is true as well. - * Default is true. - * - * @group setParam - */ - @Since("1.5.0") - def setStandardization(value: Boolean): this.type = set(standardization, value) - setDefault(standardization -> true) - - @Since("1.5.0") - override def setThreshold(value: Double): this.type = super.setThreshold(value) - setDefault(threshold -> 0.5) - - @Since("1.5.0") - override def getThreshold: Double = super.getThreshold - - /** - * Sets the value of param [[weightCol]]. - * If this is not set or empty, we treat all instance weights as 1.0. - * Default is not set, so all instances have weight one. - * - * @group setParam - */ - @Since("1.6.0") - def setWeightCol(value: String): this.type = set(weightCol, value) - - @Since("1.5.0") - override def setThresholds(value: Array[Double]): this.type = super.setThresholds(value) - - @Since("1.5.0") - override def getThresholds: Array[Double] = super.getThresholds - - /** - * Suggested depth for treeAggregate (greater than or equal to 2). - * If the dimensions of features or the number of partitions are large, - * this param could be adjusted to a larger size. - * Default is 2. - * - * @group expertSetParam - */ - @Since("2.1.0") - def setAggregationDepth(value: Int): this.type = set(aggregationDepth, value) - setDefault(aggregationDepth -> 2) - - /** - * Set the lower bounds on coefficients if fitting under bound constrained optimization. - * - * @group expertSetParam - */ - @Since("2.2.0") - def setLowerBoundsOnCoefficients(value: Matrix): this.type = set(lowerBoundsOnCoefficients, value) - - /** - * Set the upper bounds on coefficients if fitting under bound constrained optimization. - * - * @group expertSetParam - */ - @Since("2.2.0") - def setUpperBoundsOnCoefficients(value: Matrix): this.type = set(upperBoundsOnCoefficients, value) - - /** - * Set the lower bounds on intercepts if fitting under bound constrained optimization. - * - * @group expertSetParam - */ - @Since("2.2.0") - def setLowerBoundsOnIntercepts(value: Vector): this.type = set(lowerBoundsOnIntercepts, value) - - /** - * Set the upper bounds on intercepts if fitting under bound constrained optimization. - * - * @group expertSetParam - */ - @Since("2.2.0") - def setUpperBoundsOnIntercepts(value: Vector): this.type = set(upperBoundsOnIntercepts, value) - - private def assertBoundConstrainedOptimizationParamsValid( - numCoefficientSets: Int, - numFeatures: Int): Unit = { - if (isSet(lowerBoundsOnCoefficients)) { - require($(lowerBoundsOnCoefficients).numRows == numCoefficientSets && - $(lowerBoundsOnCoefficients).numCols == numFeatures, - "The shape of LowerBoundsOnCoefficients must be compatible with (1, number of features) " + - "for binomial regression, or (number of classes, number of features) for multinomial " + - "regression, but found: " + - s"(${getLowerBoundsOnCoefficients.numRows}, ${getLowerBoundsOnCoefficients.numCols}).") - } - if (isSet(upperBoundsOnCoefficients)) { - require($(upperBoundsOnCoefficients).numRows == numCoefficientSets && - $(upperBoundsOnCoefficients).numCols == numFeatures, - "The shape of upperBoundsOnCoefficients must be compatible with (1, number of features) " + - "for binomial regression, or (number of classes, number of features) for multinomial " + - "regression, but found: " + - s"(${getUpperBoundsOnCoefficients.numRows}, ${getUpperBoundsOnCoefficients.numCols}).") - } - if (isSet(lowerBoundsOnIntercepts)) { - require($(lowerBoundsOnIntercepts).size == numCoefficientSets, "The size of " + - "lowerBoundsOnIntercepts must be equal to 1 for binomial regression, or the number of " + - s"classes for multinomial regression, but found: ${getLowerBoundsOnIntercepts.size}.") - } - if (isSet(upperBoundsOnIntercepts)) { - require($(upperBoundsOnIntercepts).size == numCoefficientSets, "The size of " + - "upperBoundsOnIntercepts must be equal to 1 for binomial regression, or the number of " + - s"classes for multinomial regression, but found: ${getUpperBoundsOnIntercepts.size}.") - } - if (isSet(lowerBoundsOnCoefficients) && isSet(upperBoundsOnCoefficients)) { - require($(lowerBoundsOnCoefficients).toArray.zip($(upperBoundsOnCoefficients).toArray) - .forall(x => x._1 <= x._2), "LowerBoundsOnCoefficients should always be " + - "less than or equal to upperBoundsOnCoefficients, but found: " + - s"lowerBoundsOnCoefficients = $getLowerBoundsOnCoefficients, " + - s"upperBoundsOnCoefficients = $getUpperBoundsOnCoefficients.") - } - if (isSet(lowerBoundsOnIntercepts) && isSet(upperBoundsOnIntercepts)) { - require($(lowerBoundsOnIntercepts).toArray.zip($(upperBoundsOnIntercepts).toArray) - .forall(x => x._1 <= x._2), "LowerBoundsOnIntercepts should always be " + - "less than or equal to upperBoundsOnIntercepts, but found: " + - s"lowerBoundsOnIntercepts = $getLowerBoundsOnIntercepts, " + - s"upperBoundsOnIntercepts = $getUpperBoundsOnIntercepts.") - } - } - - private var optInitialModel: Option[LogisticRegressionModel] = None - - private[spark] def setInitialModel(model: LogisticRegressionModel): this.type = { - this.optInitialModel = Some(model) - this - } - - override protected[spark] def train(dataset: Dataset[_]): LogisticRegressionModel = { - val handlePersistence = dataset.storageLevel == StorageLevel.NONE - train(dataset, handlePersistence) - } - - protected[spark] def train( - dataset: Dataset[_], - handlePersistence: Boolean): LogisticRegressionModel = { - val w = if (!isDefined(weightCol) || $(weightCol).isEmpty) lit(1.0) else col($(weightCol)) - val instances: RDD[Instance] = - dataset.select(col($(labelCol)), w, col($(featuresCol))).rdd.map { - case Row(label: Double, weight: Double, features: Vector) => - Instance(label, weight + StaticUtils.ZERO_DOUBLE, features) - } - - if (handlePersistence) instances.persist(StorageLevel.MEMORY_AND_DISK) - - val instr = Instrumentation.create(this, instances) - instr.logParams(regParam, elasticNetParam, standardization, threshold, - maxIter, tol, fitIntercept) - - val (summarizer, labelSummarizer) = { - val seqOp = (c: (MultivariateOnlineSummarizer, MultiClassSummarizer), - instance: Instance) => - (c._1.add(instance.features, instance.weight), c._2.add(instance.label, instance.weight)) - - val combOp = (c1: (MultivariateOnlineSummarizer, MultiClassSummarizer), - c2: (MultivariateOnlineSummarizer, MultiClassSummarizer)) => - (c1._1.merge(c2._1), c1._2.merge(c2._2)) - - instances.treeAggregate( - (new MultivariateOnlineSummarizer, new MultiClassSummarizer) - )(seqOp, combOp, $(aggregationDepth)) - } - - val histogram = labelSummarizer.histogram - val numInvalid = labelSummarizer.countInvalid - val numFeatures = summarizer.mean.size - val numFeaturesPlusIntercept = if (getFitIntercept) numFeatures + 1 else numFeatures - - val numClasses = MetadataUtils.getNumClasses(dataset.schema($(labelCol))) match { - case Some(n: Int) => - require(n >= histogram.length, s"Specified number of classes $n was " + - s"less than the number of unique labels ${histogram.length}.") - n - case None => histogram.length - } - - val isMultinomial = getFamily.toLowerCase(Locale.ROOT) match { - case "binomial" => - require(numClasses == 1 || numClasses == 2, s"Binomial family only supports 1 or 2 " + - s"outcome classes but found $numClasses.") - false - case "multinomial" => true - case "auto" => numClasses > 2 - case other => throw new IllegalArgumentException(s"Unsupported family: $other") - } - val numCoefficientSets = if (isMultinomial) numClasses else 1 - - // Check params interaction is valid if fitting under bound constrained optimization. - if (usingBoundConstrainedOptimization) { - assertBoundConstrainedOptimizationParamsValid(numCoefficientSets, numFeatures) - } - - if (isDefined(thresholds)) { - require($(thresholds).length == numClasses, this.getClass.getSimpleName + - ".train() called with non-matching numClasses and thresholds.length." + - s" numClasses=$numClasses, but thresholds has length ${$(thresholds).length}") - } - - instr.logNumClasses(numClasses) - instr.logNumFeatures(numFeatures) - - val (coefficientMatrix, interceptVector, objectiveHistory) = { - if (numInvalid != 0) { - val msg = s"Classification labels should be in [0 to ${numClasses - 1}]. " + - s"Found $numInvalid invalid labels." - logError(msg) - throw new SparkException(msg) - } - - val isConstantLabel = histogram.count(_ != 0.0) == 1 - - if ($(fitIntercept) && isConstantLabel && !usingBoundConstrainedOptimization) { - logWarning(s"All labels are the same value and fitIntercept=true, so the coefficients " + - s"will be zeros. Training is not needed.") - val constantLabelIndex = Vectors.dense(histogram).argmax - val coefMatrix = new SparseMatrix(numCoefficientSets, numFeatures, - new Array[Int](numCoefficientSets + 1), Array.empty[Int], Array.empty[Double], - isTransposed = true).compressed - val interceptVec = if (isMultinomial) { - Vectors.sparse(numClasses, Seq((constantLabelIndex, Double.PositiveInfinity))) - } else { - Vectors.dense(if (numClasses == 2) Double.PositiveInfinity else Double.NegativeInfinity) - } - (coefMatrix, interceptVec, Array.empty[Double]) - } else { - if (!$(fitIntercept) && isConstantLabel) { - logWarning(s"All labels belong to a single class and fitIntercept=false. It's a " + - s"dangerous ground, so the algorithm may not converge.") - } - - val featuresMean = summarizer.mean.toArray - val featuresStd = summarizer.variance.toArray.map(math.sqrt) - - if (!$(fitIntercept) && (0 until numFeatures).exists { i => - featuresStd(i) == 0.0 && featuresMean(i) != 0.0 }) { - logWarning("Fitting LogisticRegressionModel without intercept on dataset with " + - "constant nonzero column, Spark MLlib outputs zero coefficients for constant " + - "nonzero columns. This behavior is the same as R glmnet but different from LIBSVM.") - } - - val regParamL1 = $(elasticNetParam) * $(regParam) - val regParamL2 = (1.0 - $(elasticNetParam)) * $(regParam) - - val bcFeaturesStd = instances.context.broadcast(featuresStd.map(t => - if (t != 0d) 1d / t else 0d)) - val getAggregatorFunc = new LogisticAggregatorX(bcFeaturesStd, numClasses, $(fitIntercept), - multinomial = isMultinomial)(_) - val getFeaturesStd = (j: Int) => if (j >= 0 && j < numCoefficientSets * numFeatures) { - featuresStd(j / numCoefficientSets) - } else { - 0.0 - } - - val regularization = if (regParamL2 != 0.0) { - val shouldApply = (idx: Int) => idx >= 0 && idx < numFeatures * numCoefficientSets - Some(new L2Regularization(regParamL2, shouldApply, - if ($(standardization)) None else Some(getFeaturesStd))) - } else { - None - } - - val costFun = new RDDLossFunctionX(instances, getAggregatorFunc, regularization, - $(aggregationDepth)) - - val numCoeffsPlusIntercepts = numFeaturesPlusIntercept * numCoefficientSets - - val (lowerBounds, upperBounds): (Array[Double], Array[Double]) = { - if (usingBoundConstrainedOptimization) { - val lowerBounds = Array.fill[Double](numCoeffsPlusIntercepts)(Double.MinValue) - val upperBounds = Array.fill[Double](numCoeffsPlusIntercepts)(Double.MaxValue) - val isSetLowerBoundsOnCoefficients = isSet(lowerBoundsOnCoefficients) - val isSetUpperBoundsOnCoefficients = isSet(upperBoundsOnCoefficients) - val isSetLowerBoundsOnIntercepts = isSet(lowerBoundsOnIntercepts) - val isSetUpperBoundsOnIntercepts = isSet(upperBoundsOnIntercepts) - - var i = 0 - while (i < numCoeffsPlusIntercepts) { - val coefficientSetIndex = i % numCoefficientSets - val featureIndex = i / numCoefficientSets - if (featureIndex < numFeatures) { - if (isSetLowerBoundsOnCoefficients) { - lowerBounds(i) = $(lowerBoundsOnCoefficients)( - coefficientSetIndex, featureIndex) * featuresStd(featureIndex) - } - if (isSetUpperBoundsOnCoefficients) { - upperBounds(i) = $(upperBoundsOnCoefficients)( - coefficientSetIndex, featureIndex) * featuresStd(featureIndex) - } - } else { - if (isSetLowerBoundsOnIntercepts) { - lowerBounds(i) = $(lowerBoundsOnIntercepts)(coefficientSetIndex) - } - if (isSetUpperBoundsOnIntercepts) { - upperBounds(i) = $(upperBoundsOnIntercepts)(coefficientSetIndex) - } - } - i += 1 - } - (lowerBounds, upperBounds) - } else { - (null, null) - } - } - - val optimizer = if ($(elasticNetParam) == 0.0 || $(regParam) == 0.0) { - if (lowerBounds != null && upperBounds != null) { - new LBFGSL(BDV[Double](lowerBounds), BDV[Double](upperBounds), $(maxIter), 10, $(tol)) - } else { - new LBFGSL($(maxIter), 10, $(tol)) - } - } else { - val standardizationParam = $(standardization) - val effectiveL1Reg = Array.fill[Double](numCoeffsPlusIntercepts)(0d) - .zipWithIndex.map{case (_, index) => - // Remove the L1 penalization on the intercept - val isIntercept = $(fitIntercept) && index >= numFeatures * numCoefficientSets - if (isIntercept) { - 0.0 - } else { - if (standardizationParam) { - regParamL1 - } else { - val featureIndex = index / numCoefficientSets - // If `standardization` is false, we still standardize the data - // to improve the rate of convergence; as a result, we have to - // perform this reverse standardization by penalizing each component - // differently to get effectively the same objective function when - // the training dataset is not standardized. - if (featuresStd(featureIndex) != 0.0) { - regParamL1 / featuresStd(featureIndex) - } else { - 0.0 - } - } - } - } - new OWLQNL($(maxIter), 10, $(tol), BDV[Double](effectiveL1Reg)) - } - - /* - The coefficients are laid out in column major order during training. Here we initialize - a column major matrix of initial coefficients. - */ - val initialCoefWithInterceptMatrix = - Matrices.zeros(numCoefficientSets, numFeaturesPlusIntercept) - - val initialModelIsValid = optInitialModel match { - case Some(_initialModel) => - val providedCoefs = _initialModel.coefficientMatrix - val modelIsValid = (providedCoefs.numRows == numCoefficientSets) && - (providedCoefs.numCols == numFeatures) && - (_initialModel.interceptVector.size == numCoefficientSets) && - (_initialModel.getFitIntercept == $(fitIntercept)) - if (!modelIsValid) { - logWarning(s"Initial coefficients will be ignored! Its dimensions " + - s"(${providedCoefs.numRows}, ${providedCoefs.numCols}) did not match the " + - s"expected size ($numCoefficientSets, $numFeatures)") - } - modelIsValid - case None => false - } - - if (initialModelIsValid) { - val providedCoef = optInitialModel.get.coefficientMatrix - providedCoef.foreachActive { (classIndex, featureIndex, value) => - // We need to scale the coefficients since they will be trained in the scaled space - initialCoefWithInterceptMatrix.update(classIndex, featureIndex, - value * featuresStd(featureIndex)) - } - if ($(fitIntercept)) { - optInitialModel.get.interceptVector.foreachActive { (classIndex, value) => - initialCoefWithInterceptMatrix.update(classIndex, numFeatures, value) - } - } - } else if ($(fitIntercept) && isMultinomial) { - /* - For multinomial logistic regression, when we initialize the coefficients as zeros, - it will converge faster if we initialize the intercepts such that - it follows the distribution of the labels. - {{{ - P(1) = \exp(b_1) / Z - ... - P(K) = \exp(b_K) / Z - where Z = \sum_{k=1}^{K} \exp(b_k) - }}} - Since this doesn't have a unique solution, one of the solutions that satisfies the - above equations is - {{{ - \exp(b_k) = count_k * \exp(\lambda) - b_k = \log(count_k) * \lambda - }}} - \lambda is a free parameter, so choose the phase \lambda such that the - mean is centered. This yields - {{{ - b_k = \log(count_k) - b_k' = b_k - \mean(b_k) - }}} - */ - val rawIntercepts = histogram.map(math.log1p) // add 1 for smoothing (log1p(x) = log(1+x)) - val rawMean = rawIntercepts.sum / rawIntercepts.length - rawIntercepts.indices.foreach { i => - initialCoefWithInterceptMatrix.update(i, numFeatures, rawIntercepts(i) - rawMean) - } - } else if ($(fitIntercept)) { - /* - For binary logistic regression, when we initialize the coefficients as zeros, - it will converge faster if we initialize the intercept such that - it follows the distribution of the labels. - - {{{ - P(0) = 1 / (1 + \exp(b)), and - P(1) = \exp(b) / (1 + \exp(b)) - }}}, hence - {{{ - b = \log{P(1) / P(0)} = \log{count_1 / count_0} - }}} - */ - initialCoefWithInterceptMatrix.update(0, numFeatures, - math.log(histogram(1) / histogram(0))) - } - - if (usingBoundConstrainedOptimization) { - // Make sure all initial values locate in the corresponding bound. - var i = 0 - while (i < numCoeffsPlusIntercepts) { - val coefficientSetIndex = i % numCoefficientSets - val featureIndex = i / numCoefficientSets - if (initialCoefWithInterceptMatrix(coefficientSetIndex, featureIndex) < lowerBounds(i)) - { - initialCoefWithInterceptMatrix.update( - coefficientSetIndex, featureIndex, lowerBounds(i)) - } else if ( - initialCoefWithInterceptMatrix(coefficientSetIndex, featureIndex) > upperBounds(i)) - { - initialCoefWithInterceptMatrix.update( - coefficientSetIndex, featureIndex, upperBounds(i)) - } - i += 1 - } - } - - val states = optimizer.iterations(new CachedDiffFunction(costFun), - new BDV[Double](initialCoefWithInterceptMatrix.toArray)) - - /* - Note that in Logistic Regression, the objective history (loss + regularization) - is log-likelihood which is invariant under feature standardization. As a result, - the objective history from optimizer is the same as the one in the original space. - */ - val arrayBuilder = mutable.ArrayBuilder.make[Double] - var state: optimizer.State = null - while (states.hasNext) { - state = states.next() - arrayBuilder += state.adjustedValue - } - bcFeaturesStd.destroy(blocking = false) - - if (state == null) { - val msg = s"${optimizer.getClass.getName} failed." - logError(msg) - throw new SparkException(msg) - } - - /* - The coefficients are trained in the scaled space; we're converting them back to - the original space. - - Additionally, since the coefficients were laid out in column major order during training - to avoid extra computation, we convert them back to row major before passing them to the - model. - - Note that the intercept in scaled space and original space is the same; - as a result, no scaling is needed. - */ - val allCoefficients = state.x.toArray.clone() - val allCoefMatrix = new DenseMatrix(numCoefficientSets, numFeaturesPlusIntercept, - allCoefficients) - val denseCoefficientMatrix = new DenseMatrix(numCoefficientSets, numFeatures, - new Array[Double](numCoefficientSets * numFeatures), isTransposed = true) - val interceptVec = if ($(fitIntercept) || !isMultinomial) { - Vectors.zeros(numCoefficientSets) - } else { - Vectors.sparse(numCoefficientSets, Seq.empty) - } - // separate intercepts and coefficients from the combined matrix - allCoefMatrix.foreachActive { (classIndex, featureIndex, value) => - val isIntercept = $(fitIntercept) && (featureIndex == numFeatures) - if (!isIntercept && featuresStd(featureIndex) != 0.0) { - denseCoefficientMatrix.update(classIndex, featureIndex, - value / featuresStd(featureIndex)) - } - if (isIntercept) interceptVec.toArray(classIndex) = value - } - - if ($(regParam) == 0.0 && isMultinomial && !usingBoundConstrainedOptimization) { - /* - When no regularization is applied, the multinomial coefficients lack identifiability - because we do not use a pivot class. We can add any constant value to the coefficients - and get the same likelihood. So here, we choose the mean centered coefficients for - reproducibility. This method follows the approach in glmnet, described here: - - Friedman, et al. "Regularization Paths for Generalized Linear Models via - Coordinate Descent," https://core.ac.uk/download/files/153/6287975.pdf - */ - val centers = Array.fill(numFeatures)(0.0) - denseCoefficientMatrix.foreachActive { case (i, j, v) => - centers(j) += v - } - centers.transform(_ / numCoefficientSets) - denseCoefficientMatrix.foreachActive { case (i, j, v) => - denseCoefficientMatrix.update(i, j, v - centers(j)) - } - } - - // center the intercepts when using multinomial algorithm - if ($(fitIntercept) && isMultinomial && !usingBoundConstrainedOptimization) { - val interceptArray = interceptVec.toArray - val interceptMean = interceptArray.sum / interceptArray.length - (0 until interceptVec.size).foreach { i => interceptArray(i) -= interceptMean } - } - (denseCoefficientMatrix.compressed, interceptVec.compressed, arrayBuilder.result()) - } - } - - if (handlePersistence) instances.unpersist() - - val model = copyValues(new LogisticRegressionModel(uid, coefficientMatrix, interceptVector, - numClasses, isMultinomial)) - - val (summaryModel, probabilityColName, predictionColName) = model.findSummaryModel() - val logRegSummary = if (numClasses <= 2) { - new BinaryLogisticRegressionTrainingSummaryImpl( - summaryModel.transform(dataset), - probabilityColName, - predictionColName, - $(labelCol), - $(featuresCol), - objectiveHistory) - } else { - new LogisticRegressionTrainingSummaryImpl( - summaryModel.transform(dataset), - probabilityColName, - predictionColName, - $(labelCol), - $(featuresCol), - objectiveHistory) - } - model.setSummary(Some(logRegSummary)) - instr.logSuccess(model) - model - } - - @Since("1.4.0") - override def copy(extra: ParamMap): LogisticRegression = defaultCopy(extra) -} - -@Since("1.6.0") -object LogisticRegression extends DefaultParamsReadable[LogisticRegression] { - - @Since("1.6.0") - override def load(path: String): LogisticRegression = super.load(path) - - private[classification] val supportedFamilyNames = - Array("auto", "binomial", "multinomial").map(_.toLowerCase(Locale.ROOT)) -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/RandomForestClassifier.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/RandomForestClassifier.scala deleted file mode 100644 index d2dd1ec..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/classification/RandomForestClassifier.scala +++ /dev/null @@ -1,356 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.classification - -import org.json4s.{DefaultFormats, JObject} -import org.json4s.JsonDSL._ - -import org.apache.spark.annotation.Since -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.linalg.{DenseVector, SparseVector, Vector, Vectors} -import org.apache.spark.ml.param.ParamMap -import org.apache.spark.ml.tree._ -import org.apache.spark.ml.tree.impl.RandomForest -import org.apache.spark.ml.util._ -import org.apache.spark.ml.util.DefaultParamsReader.Metadata -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo} -import org.apache.spark.mllib.tree.model.{RandomForestModel => OldRandomForestModel} -import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{DataFrame, Dataset} -import org.apache.spark.sql.functions._ - - -/** - * Random Forest learning algorithm for - * classification. - * It supports both binary and multiclass labels, as well as both continuous and categorical - * features. - */ -@Since("1.4.0") -class RandomForestClassifier @Since("1.4.0") ( - @Since("1.4.0") override val uid: String) - extends ProbabilisticClassifier[Vector, RandomForestClassifier, RandomForestClassificationModel] - with RandomForestClassifierParams with DefaultParamsWritable { - - @Since("1.4.0") - def this() = this(Identifiable.randomUID("rfc")) - - // Override parameter setters from parent trait for Java API compatibility. - - // Parameters from TreeClassifierParams: - - /** @group setParam */ - @Since("1.4.0") - override def setMaxDepth(value: Int): this.type = set(maxDepth, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMaxBins(value: Int): this.type = set(maxBins, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInfoGain(value: Double): this.type = set(minInfoGain, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value) - - /** - * Specifies how often to checkpoint the cached node IDs. - * E.g. 10 means that the cache will get checkpointed every 10 iterations. - * This is only used if cacheNodeIds is true and if the checkpoint directory is set in - * [[org.apache.spark.SparkContext]]. - * Must be at least 1. - * (default = 10) - * @group setParam - */ - @Since("1.4.0") - override def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value) - - /** @group setParam */ - @Since("1.4.0") - override def setImpurity(value: String): this.type = set(impurity, value) - - // Parameters from TreeEnsembleParams: - - /** @group setParam */ - @Since("1.4.0") - override def setSubsamplingRate(value: Double): this.type = set(subsamplingRate, value) - - /** @group setParam */ - @Since("1.4.0") - override def setSeed(value: Long): this.type = set(seed, value) - - // Parameters from RandomForestParams: - - /** @group setParam */ - @Since("1.4.0") - override def setNumTrees(value: Int): this.type = set(numTrees, value) - - /** @group setParam */ - @Since("1.4.0") - override def setFeatureSubsetStrategy(value: String): this.type = - set(featureSubsetStrategy, value) - - override protected def train(dataset: Dataset[_]): RandomForestClassificationModel = { - val categoricalFeatures: Map[Int, Int] = - MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol))) - val numClasses: Int = getNumClasses(dataset) - - if (isDefined(thresholds)) { - require($(thresholds).length == numClasses, this.getClass.getSimpleName + - ".train() called with non-matching numClasses and thresholds.length." + - s" numClasses=$numClasses, but thresholds has length ${$(thresholds).length}") - } - - val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset, numClasses) - val strategy = - super.getOldStrategy(categoricalFeatures, numClasses, OldAlgo.Classification, getOldImpurity) - - val instr = Instrumentation.create(this, oldDataset) - instr.logParams(labelCol, featuresCol, predictionCol, probabilityCol, rawPredictionCol, - impurity, numTrees, featureSubsetStrategy, maxDepth, maxBins, maxMemoryInMB, minInfoGain, - minInstancesPerNode, seed, subsamplingRate, thresholds, cacheNodeIds, checkpointInterval) - - val trees = RandomForest - .run(oldDataset, strategy, getNumTrees, getFeatureSubsetStrategy, getSeed, Some(instr)) - .map(_.asInstanceOf[DecisionTreeClassificationModel]) - - val numFeatures = oldDataset.first().features.size - val m = new RandomForestClassificationModel(uid, trees, numFeatures, numClasses) - instr.logSuccess(m) - m - } - - @Since("1.4.1") - override def copy(extra: ParamMap): RandomForestClassifier = defaultCopy(extra) -} - -@Since("1.4.0") -object RandomForestClassifier extends DefaultParamsReadable[RandomForestClassifier] { - /** Accessor for supported impurity settings: entropy, gini */ - @Since("1.4.0") - final val supportedImpurities: Array[String] = TreeClassifierParams.supportedImpurities - - /** Accessor for supported featureSubsetStrategy settings: auto, all, onethird, sqrt, log2 */ - @Since("1.4.0") - final val supportedFeatureSubsetStrategies: Array[String] = - TreeEnsembleParams.supportedFeatureSubsetStrategies - - @Since("2.0.0") - override def load(path: String): RandomForestClassifier = super.load(path) -} - -/** - * Random Forest model for classification. - * It supports both binary and multiclass labels, as well as both continuous and categorical - * features. - * - * @param _trees Decision trees in the ensemble. - * Warning: These have null parents. - */ -@Since("1.4.0") -class RandomForestClassificationModel private[ml] ( - @Since("1.5.0") override val uid: String, - private val _trees: Array[DecisionTreeClassificationModel], - @Since("1.6.0") override val numFeatures: Int, - @Since("1.5.0") override val numClasses: Int) - extends ProbabilisticClassificationModel[Vector, RandomForestClassificationModel] - with RandomForestClassifierParams with TreeEnsembleModel[DecisionTreeClassificationModel] - with MLWritable with Serializable { - - require(_trees.nonEmpty, "RandomForestClassificationModel requires at least 1 tree.") - - /** - * Construct a random forest classification model, with all trees weighted equally. - * - * @param trees Component trees - */ - private[ml] def this( - trees: Array[DecisionTreeClassificationModel], - numFeatures: Int, - numClasses: Int) = - this(Identifiable.randomUID("rfc"), trees, numFeatures, numClasses) - - @Since("1.4.0") - override def trees: Array[DecisionTreeClassificationModel] = _trees - - // Note: We may add support for weights (based on tree performance) later on. - private lazy val _treeWeights: Array[Double] = Array.fill[Double](_trees.length)(1.0) - - @Since("1.4.0") - override def treeWeights: Array[Double] = _treeWeights - - override protected def transformImpl(dataset: Dataset[_]): DataFrame = { - val bcastModel = dataset.sparkSession.sparkContext.broadcast(this) - val predictUDF = udf { (features: Any) => - bcastModel.value.predict(features.asInstanceOf[Vector]) - } - dataset.withColumn($(predictionCol), predictUDF(col($(featuresCol)))) - } - - override protected def predictRaw(features: Vector): Vector = { - // TODO: When we add a generic Bagging class, handle transform there: SPARK-7128 - // Classifies using majority votes. - // Ignore the tree weights since all are 1.0 for now. - val votes = Array.fill[Double](numClasses)(0.0) - _trees.view.foreach { tree => - val classCounts: Array[Double] = tree.rootNode.predictImpl(features).impurityStats.stats - val total = classCounts.sum - if (total != 0) { - var i = 0 - while (i < numClasses) { - votes(i) += classCounts(i) / total - i += 1 - } - } - } - Vectors.dense(votes) - } - - override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = { - rawPrediction match { - case dv: DenseVector => - ProbabilisticClassificationModel.normalizeToProbabilitiesInPlace(dv) - dv - case sv: SparseVector => - throw new RuntimeException("Unexpected error in RandomForestClassificationModel:" + - " raw2probabilityInPlace encountered SparseVector") - } - } - - @Since("1.4.0") - override def copy(extra: ParamMap): RandomForestClassificationModel = { - copyValues(new RandomForestClassificationModel(uid, _trees, numFeatures, numClasses), extra) - .setParent(parent) - } - - @Since("1.4.0") - override def toString: String = { - s"RandomForestClassificationModel (uid=$uid) with $getNumTrees trees" - } - - /** - * Estimate of the importance of each feature. - * - * Each feature's importance is the average of its importance across all trees in the ensemble - * The importance vector is normalized to sum to 1. This method is suggested by Hastie et al. - * (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.) - * and follows the implementation from scikit-learn. - * - * @see `DecisionTreeClassificationModel.featureImportances` - */ - @Since("1.5.0") - lazy val featureImportances: Vector = TreeEnsembleModel.featureImportances(trees, numFeatures) - - /** (private[ml]) Convert to a model in the old API */ - private[ml] def toOld: OldRandomForestModel = { - new OldRandomForestModel(OldAlgo.Classification, _trees.map(_.toOld)) - } - - @Since("2.0.0") - override def write: MLWriter = - new RandomForestClassificationModel.RandomForestClassificationModelWriter(this) -} - -@Since("2.0.0") -object RandomForestClassificationModel extends MLReadable[RandomForestClassificationModel] { - - @Since("2.0.0") - override def read: MLReader[RandomForestClassificationModel] = - new RandomForestClassificationModelReader - - @Since("2.0.0") - override def load(path: String): RandomForestClassificationModel = super.load(path) - - private[RandomForestClassificationModel] - class RandomForestClassificationModelWriter(instance: RandomForestClassificationModel) - extends MLWriter { - - override protected def saveImpl(path: String): Unit = { - // Note: numTrees is not currently used, but could be nice to store for fast querying. - val extraMetadata: JObject = Map( - "numFeatures" -> instance.numFeatures, - "numClasses" -> instance.numClasses, - "numTrees" -> instance.getNumTrees) - EnsembleModelReadWrite.saveImpl(instance, path, sparkSession, extraMetadata) - } - } - - private class RandomForestClassificationModelReader - extends MLReader[RandomForestClassificationModel] { - - /** Checked against metadata when loading model */ - private val className = classOf[RandomForestClassificationModel].getName - private val treeClassName = classOf[DecisionTreeClassificationModel].getName - - override def load(path: String): RandomForestClassificationModel = { - implicit val format = DefaultFormats - val (metadata: Metadata, treesData: Array[(Metadata, Node)], _) = - EnsembleModelReadWrite.loadImpl(path, sparkSession, className, treeClassName) - val numFeatures = (metadata.metadata \ "numFeatures").extract[Int] - val numClasses = (metadata.metadata \ "numClasses").extract[Int] - val numTrees = (metadata.metadata \ "numTrees").extract[Int] - - val trees: Array[DecisionTreeClassificationModel] = treesData.map { - case (treeMetadata, root) => - val tree = - new DecisionTreeClassificationModel(treeMetadata.uid, root, numFeatures, numClasses) - DefaultParamsReader.getAndSetParams(tree, treeMetadata) - tree - } - require(numTrees == trees.length, s"RandomForestClassificationModel.load expected $numTrees" + - s" trees based on metadata but found ${trees.length} trees.") - - val model = new RandomForestClassificationModel(metadata.uid, trees, numFeatures, numClasses) - DefaultParamsReader.getAndSetParams(model, metadata) - model - } - } - - /** Convert a model from the old API */ - private[ml] def fromOld( - oldModel: OldRandomForestModel, - parent: RandomForestClassifier, - categoricalFeatures: Map[Int, Int], - numClasses: Int, - numFeatures: Int = -1): RandomForestClassificationModel = { - require(oldModel.algo == OldAlgo.Classification, "Cannot convert RandomForestModel" + - s" with algo=${oldModel.algo} (old API) to RandomForestClassificationModel (new API).") - val newTrees = oldModel.trees.map { tree => - // parent for each tree is null since there is no good way to set this. - DecisionTreeClassificationModel.fromOld(tree, null, categoricalFeatures) - } - val uid = if (parent != null) parent.uid else Identifiable.randomUID("rfc") - new RandomForestClassificationModel(uid, newTrees, numFeatures, numClasses) - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/feature/IDF.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/feature/IDF.scala deleted file mode 100644 index 46a0730..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/feature/IDF.scala +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.feature - -import org.apache.hadoop.fs.Path - -import org.apache.spark.annotation.Since -import org.apache.spark.ml._ -import org.apache.spark.ml.linalg.{Vector, VectorUDT} -import org.apache.spark.ml.param._ -import org.apache.spark.ml.param.shared._ -import org.apache.spark.ml.util._ -import org.apache.spark.mllib.feature -import org.apache.spark.mllib.linalg.{Vector => OldVector, Vectors => OldVectors} -import org.apache.spark.mllib.util.MLUtils -import org.apache.spark.rdd.RDD -import org.apache.spark.sql._ -import org.apache.spark.sql.functions._ -import org.apache.spark.sql.types.StructType - -/** - * Params for [[IDF]] and [[IDFModel]]. - */ -private[feature] trait IDFBase extends Params with HasInputCol with HasOutputCol { - - /** - * The minimum number of documents in which a term should appear. - * Default: 0 - * @group param - */ - final val minDocFreq = new IntParam( - this, "minDocFreq", "minimum number of documents in which a term should appear for filtering" + - " (>= 0)", ParamValidators.gtEq(0)) - - setDefault(minDocFreq -> 0) - - /** @group getParam */ - def getMinDocFreq: Int = $(minDocFreq) - - /** - * Validate and transform the input schema. - */ - protected def validateAndTransformSchema(schema: StructType): StructType = { - SchemaUtils.checkColumnType(schema, $(inputCol), new VectorUDT) - SchemaUtils.appendColumn(schema, $(outputCol), new VectorUDT) - } -} - -/** - * Compute the Inverse Document Frequency (IDF) given a collection of documents. - */ -@Since("1.4.0") -final class IDF @Since("1.4.0") (@Since("1.4.0") override val uid: String) - extends Estimator[IDFModel] with IDFBase with DefaultParamsWritable { - - @Since("1.4.0") - def this() = this(Identifiable.randomUID("idf")) - - /** @group setParam */ - @Since("1.4.0") - def setInputCol(value: String): this.type = set(inputCol, value) - - /** @group setParam */ - @Since("1.4.0") - def setOutputCol(value: String): this.type = set(outputCol, value) - - /** @group setParam */ - @Since("1.4.0") - def setMinDocFreq(value: Int): this.type = set(minDocFreq, value) - - @Since("2.0.0") - override def fit(dataset: Dataset[_]): IDFModel = { - transformSchema(dataset.schema, logging = true) - val input: RDD[OldVector] = dataset.select($(inputCol)).rdd.map { - case Row(v: Vector) => OldVectors.fromML(v) - } - val idf = new feature.IDF($(minDocFreq)).fit(input) - copyValues(new IDFModel(uid, idf).setParent(this)) - } - - @Since("1.4.0") - override def transformSchema(schema: StructType): StructType = { - validateAndTransformSchema(schema) - } - - @Since("1.4.1") - override def copy(extra: ParamMap): IDF = defaultCopy(extra) -} - -@Since("1.6.0") -object IDF extends DefaultParamsReadable[IDF] { - - @Since("1.6.0") - override def load(path: String): IDF = super.load(path) -} - -/** - * Model fitted by [[IDF]]. - */ -@Since("1.4.0") -class IDFModel private[ml] ( - @Since("1.4.0") override val uid: String, - idfModel: feature.IDFModel) - extends Model[IDFModel] with IDFBase with MLWritable { - - import IDFModel._ - - /** @group setParam */ - @Since("1.4.0") - def setInputCol(value: String): this.type = set(inputCol, value) - - /** @group setParam */ - @Since("1.4.0") - def setOutputCol(value: String): this.type = set(outputCol, value) - - @Since("2.0.0") - override def transform(dataset: Dataset[_]): DataFrame = { - transformSchema(dataset.schema, logging = true) - // TODO: Make the idfModel.transform natively in ml framework to avoid extra conversion. - val idf = udf { vec: Vector => idfModel.transform(OldVectors.fromML(vec)).asML } - dataset.withColumn($(outputCol), idf(col($(inputCol)))) - } - - @Since("1.4.0") - override def transformSchema(schema: StructType): StructType = { - validateAndTransformSchema(schema) - } - - @Since("1.4.1") - override def copy(extra: ParamMap): IDFModel = { - val copied = new IDFModel(uid, idfModel) - copyValues(copied, extra).setParent(parent) - } - - /** Returns the IDF vector. */ - @Since("2.0.0") - def idf: Vector = idfModel.idf.asML - - @Since("1.6.0") - override def write: MLWriter = new IDFModelWriter(this) -} - -@Since("1.6.0") -object IDFModel extends MLReadable[IDFModel] { - - private[IDFModel] class IDFModelWriter(instance: IDFModel) extends MLWriter { - - private case class Data(idf: Vector) - - override protected def saveImpl(path: String): Unit = { - DefaultParamsWriter.saveMetadata(instance, path, sc) - val data = Data(instance.idf) - val dataPath = new Path(path, "data").toString - sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath) - } - } - - private class IDFModelReader extends MLReader[IDFModel] { - - private val className = classOf[IDFModel].getName - - override def load(path: String): IDFModel = { - val metadata = DefaultParamsReader.loadMetadata(path, sc, className) - val dataPath = new Path(path, "data").toString - val data = sparkSession.read.parquet(dataPath) - val Row(idf: Vector) = MLUtils.convertVectorColumnsToML(data, "idf") - .select("idf") - .head() - val model = new IDFModel(metadata.uid, new feature.IDFModel(OldVectors.fromML(idf))) - DefaultParamsReader.getAndSetParams(model, metadata) - model - } - } - - @Since("1.6.0") - override def read: MLReader[IDFModel] = new IDFModelReader - - @Since("1.6.0") - override def load(path: String): IDFModel = super.load(path) -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/fpm/PrefixSpan.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/fpm/PrefixSpan.scala new file mode 100644 index 0000000..10a569a --- /dev/null +++ b/ml-accelerator/src/main/scala/org/apache/spark/ml/fpm/PrefixSpan.scala @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.ml.fpm + +import org.apache.spark.annotation.Since +import org.apache.spark.ml.param._ +import org.apache.spark.ml.util.Identifiable +import org.apache.spark.ml.util.Instrumentation.instrumented +import org.apache.spark.mllib.fpm.{PrefixSpan => mllibPrefixSpan} +import org.apache.spark.sql.{DataFrame, Dataset, Row} +import org.apache.spark.sql.functions.col +import org.apache.spark.sql.types.{ArrayType, LongType, StructField, StructType} + +/** + * A parallel PrefixSpan algorithm to mine frequent sequential patterns. + * The PrefixSpan algorithm is described in J. Pei, et al., PrefixSpan: Mining Sequential Patterns + * Efficiently by Prefix-Projected Pattern Growth + * (see here). + * This class is not yet an Estimator/Transformer, use `findFrequentSequentialPatterns` method to + * run the PrefixSpan algorithm. + * + * @see Sequential Pattern Mining + * (Wikipedia) + */ +@Since("2.4.0") +final class PrefixSpan(@Since("2.4.0") override val uid: String) extends Params { + + @Since("2.4.0") + def this() = this(Identifiable.randomUID("prefixSpan")) + + /** + * Param for the minimal support level (default: `0.1`). + * Sequential patterns that appear more than (minSupport * size-of-the-dataset) times are + * identified as frequent sequential patterns. + * @group param + */ + @Since("2.4.0") + val minSupport = new DoubleParam(this, "minSupport", "The minimal support level of the " + + "sequential pattern. Sequential pattern that appears more than " + + "(minSupport * size-of-the-dataset) " + + "times will be output.", ParamValidators.gtEq(0.0)) + + /** @group getParam */ + @Since("2.4.0") + def getMinSupport: Double = $(minSupport) + + /** @group setParam */ + @Since("2.4.0") + def setMinSupport(value: Double): this.type = set(minSupport, value) + + /** + * Param for the maximal pattern length (default: `10`). + * @group param + */ + @Since("2.4.0") + val maxPatternLength = new IntParam(this, "maxPatternLength", + "The maximal length of the sequential pattern.", + ParamValidators.gt(0)) + + /** @group getParam */ + @Since("2.4.0") + def getMaxPatternLength: Int = $(maxPatternLength) + + /** @group setParam */ + @Since("2.4.0") + def setMaxPatternLength(value: Int): this.type = set(maxPatternLength, value) + + /** + * Param for the maximum number of items (including delimiters used in the internal storage + * format) allowed in a projected database before local processing (default: `32000000`). + * If a projected database exceeds this size, another iteration of distributed prefix growth + * is run. + * @group param + */ + @Since("2.4.0") + val maxLocalProjDBSize = new LongParam(this, "maxLocalProjDBSize", + "The maximum number of items (including delimiters used in the internal storage format) " + + "allowed in a projected database before local processing. If a projected database exceeds " + + "this size, another iteration of distributed prefix growth is run.", + ParamValidators.gt(0)) + + /** @group getParam */ + @Since("2.4.0") + def getMaxLocalProjDBSize: Long = $(maxLocalProjDBSize) + + /** @group setParam */ + @Since("2.4.0") + def setMaxLocalProjDBSize(value: Long): this.type = set(maxLocalProjDBSize, value) + + /** + * Param for the name of the sequence column in dataset (default "sequence"), rows with + * nulls in this column are ignored. + * @group param + */ + @Since("2.4.0") + val sequenceCol = new Param[String](this, "sequenceCol", "The name of the sequence column in " + + "dataset, rows with nulls in this column are ignored.") + + /** @group getParam */ + @Since("2.4.0") + def getSequenceCol: String = $(sequenceCol) + + /** @group setParam */ + @Since("2.4.0") + def setSequenceCol(value: String): this.type = set(sequenceCol, value) + + setDefault(minSupport -> 0.1, maxPatternLength -> 10, maxLocalProjDBSize -> 32000000, + sequenceCol -> "sequence") + + /** + * Finds the complete set of frequent sequential patterns in the input sequences of itemsets. + * + * @param dataset A dataset or a dataframe containing a sequence column which is + * {{{ArrayType(ArrayType(T))}}} type, T is the item type for the input dataset. + * @return A `DataFrame` that contains columns of sequence and corresponding frequency. + * The schema of it will be: + * - `sequence: ArrayType(ArrayType(T))` (T is the item type) + * - `freq: Long` + */ + @Since("2.4.0") + def findFrequentSequentialPatterns(dataset: Dataset[_]): DataFrame = instrumented { instr => + instr.logDataset(dataset) + instr.logParams(this, params: _*) + + val sequenceColParam = $(sequenceCol) + val inputType = dataset.schema(sequenceColParam).dataType + require(inputType.isInstanceOf[ArrayType] && + inputType.asInstanceOf[ArrayType].elementType.isInstanceOf[ArrayType], + s"The input column must be ArrayType and the array element type must also be ArrayType, " + + s"but got $inputType.") + + val data = dataset.select(sequenceColParam) + val sequences = data.where(col(sequenceColParam).isNotNull).rdd + .map(r => r.getSeq[scala.collection.Seq[Any]](0).map(_.toArray).toArray) + + val mllibPrefixSpan = new mllibPrefixSpan() + .setMinSupport($(minSupport)) + .setMaxPatternLength($(maxPatternLength)) + .setMaxLocalProjDBSize($(maxLocalProjDBSize)) + + val rows = mllibPrefixSpan.run(sequences).freqSequences.map(f => Row(f.sequence, f.freq)) + val schema = StructType(Seq( + StructField("sequence", dataset.schema(sequenceColParam).dataType, nullable = false), + StructField("freq", LongType, nullable = false))) + val freqSequences = dataset.sparkSession.createDataFrame(rows, schema) + + freqSequences + } + + @Since("2.4.0") + override def copy(extra: ParamMap): PrefixSpan = defaultCopy(extra) + +} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/DifferentiableLossAggregatorX.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/DifferentiableLossAggregatorX.scala deleted file mode 100644 index 48bc3b8..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/DifferentiableLossAggregatorX.scala +++ /dev/null @@ -1,98 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.spark.ml.optim.aggregator - -import it.unimi.dsi.fastutil.doubles.DoubleArrayList - -import org.apache.spark.ml.linalg.{BLAS, Vector, Vectors} - -/** - * A parent trait for aggregators used in fitting MLlib models. This parent trait implements - * some of the common code shared between concrete instances of aggregators. Subclasses of this - * aggregator need only implement the `add` method. - * - * @tparam Datum The type of the instances added to the aggregator to update the loss and gradient. - * @tparam Agg Specialization of [[DifferentiableLossAggregator]]. Classes that subclass this - * type need to use this parameter to specify the concrete type of the aggregator. - */ -private[ml] trait DifferentiableLossAggregatorX[ - Datum, - Agg <: DifferentiableLossAggregatorX[Datum, Agg]] extends Serializable { - - self: Agg => // enforce classes that extend this to be the same type as `Agg` - - protected var weightSum: Double = 0.0 - protected var lossSum: Double = 0.0 - - /** The dimension of the gradient array. */ - protected val dim: Int - - /** Array of gradient values that are mutated when new instances are added to the aggregator. */ - protected lazy val gradientSumArray: DoubleArrayList = - DoubleArrayList.wrap(Array.ofDim[Double](dim)) - - /** Add a single data point to this aggregator. */ - def add(instance: Datum): Agg - - /** Merge two aggregators. The `this` object will be modified in place and returned. */ - def merge(other: Agg): Agg = { - require(dim == other.dim, s"Dimensions mismatch when merging with another " + - s"${getClass.getSimpleName}. Expecting $dim but got ${other.dim}.") - - if (other.weightSum != 0) { - weightSum += other.weightSum - lossSum += other.lossSum - - var i = 0 - val localThisGradientSumArray = this.gradientSumArray - val localOtherGradientSumArray = other.gradientSumArray - while (i < dim) { - val e = localThisGradientSumArray.getDouble(i) - localThisGradientSumArray.set(i, e + localOtherGradientSumArray.getDouble(i)) - i += 1 - } - } - this - } - - /** The current weighted averaged gradient. */ - def gradient: Vector = { - require(weightSum > 0.0, s"The effective number of instances should be " + - s"greater than 0.0, but was $weightSum.") - val result = Vectors.dense(gradientSumArray.elements().clone()) - BLAS.scal(1.0 / weightSum, result) - result - } - - /** Weighted count of instances in this aggregator. */ - def weight: Double = weightSum - - /** The current loss value of this aggregator. */ - def loss: Double = { - require(weightSum > 0.0, s"The effective number of instances should be " + - s"greater than 0.0, but was $weightSum.") - lossSum / weightSum - } - -} - diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/HingeAggregatorX.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/HingeAggregatorX.scala deleted file mode 100644 index d4a4f1b..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/HingeAggregatorX.scala +++ /dev/null @@ -1,112 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.optim.aggregator - -import it.unimi.dsi.fastutil.doubles.DoubleArrayList - -import org.apache.spark.broadcast.Broadcast -import org.apache.spark.ml.feature.Instance -import org.apache.spark.ml.linalg._ - -/** - * HingeAggregator computes the gradient and loss for Hinge loss function as used in - * binary classification for instances in sparse or dense vector in an online fashion. - * - * Two HingeAggregators can be merged together to have a summary of loss and gradient of - * the corresponding joint dataset. - * - * This class standardizes feature values during computation using bcFeaturesStd. - * - * @param bcCoefficients The coefficients corresponding to the features. - * @param fitIntercept Whether to fit an intercept term. - * @param bcFeaturesStd The standard deviation values of the features. - */ -private[ml] class HingeAggregatorX( - bcFeaturesStd: Broadcast[Array[Double]], - fitIntercept: Boolean)(bcCoefficients: Broadcast[Vector]) - extends DifferentiableLossAggregatorX[Instance, HingeAggregatorX] { - - private val numFeatures: Int = bcFeaturesStd.value.length - private val numFeaturesPlusIntercept: Int = if (fitIntercept) numFeatures + 1 else numFeatures - @transient private lazy val coefficientsArray = bcCoefficients.value match { - case DenseVector(values) => values - case _ => throw new IllegalArgumentException(s"coefficients only supports dense vector" + - s" but got type ${bcCoefficients.value.getClass}.") - } - protected override val dim: Int = numFeaturesPlusIntercept - - /** - * Add a new training instance to this HingeAggregator, and update the loss and gradient - * of the objective function. - * - * @param instance The instance of data point to be added. - * @return This HingeAggregator object. - */ - def add(instance: Instance): this.type = { - instance match { case Instance(label, weight, features) => - require(numFeatures == features.size, s"Dimensions mismatch when adding new instance." + - s" Expecting $numFeatures but got ${features.size}.") - require(weight >= 0.0, s"instance weight, $weight has to be >= 0.0") - - if (weight == 0.0) return this - val localFeaturesStd = DoubleArrayList.wrap(bcFeaturesStd.value) - val localCoefficients = DoubleArrayList.wrap(coefficientsArray) - val localGradientSumArray = gradientSumArray - - val dotProduct = { - var sum = 0.0 - features.foreachActive { (index, value) => - sum += localCoefficients.getDouble(index) * value * localFeaturesStd.getDouble(index) - } - if (fitIntercept) sum += localCoefficients.getDouble(numFeaturesPlusIntercept - 1) - sum - } - // Our loss function with {0, 1} labels is max(0, 1 - (2y - 1) (f_w(x))) - // Therefore the gradient is -(2y - 1)*x - val labelScaled = 2 * label - 1.0 - val loss = if (1.0 > labelScaled * dotProduct) { - (1.0 - labelScaled * dotProduct) * weight - } else { - 0.0 - } - - if (1.0 > labelScaled * dotProduct) { - val gradientScale = -labelScaled * weight - features.foreachActive { (index, value) => - val e = localGradientSumArray.getDouble(index) - localGradientSumArray.set(index, e + value * gradientScale - * localFeaturesStd.getDouble(index)) - } - if (fitIntercept) { - val e = localGradientSumArray.getDouble(localGradientSumArray.size() - 1) - localGradientSumArray.set(localGradientSumArray.size() - 1, e + gradientScale) - } - } - - lossSum += loss - weightSum += weight - this - } - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/HuberAggregatorX.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/HuberAggregatorX.scala deleted file mode 100644 index c35cdb5..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/HuberAggregatorX.scala +++ /dev/null @@ -1,163 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.spark.ml.optim.aggregator - -import it.unimi.dsi.fastutil.doubles.DoubleArrayList - -import org.apache.spark.broadcast.Broadcast -import org.apache.spark.ml.feature.Instance -import org.apache.spark.ml.linalg.{DenseVector, Vector} - -/** - * HuberAggregator computes the gradient and loss for a huber loss function, - * as used in robust regression for samples in sparse or dense vector in an online fashion. - * - * The huber loss function based on: - * Art B. Owen (2006), - * A robust hybrid of lasso and ridge regression. - * - * Two HuberAggregator can be merged together to have a summary of loss and gradient of - * the corresponding joint dataset. - * - * The huber loss function is given by - * - *
- * $$ - * \begin{align} - * \min_{w, \sigma}\frac{1}{2n}{\sum_{i=1}^n\left(\sigma + - * H_m\left(\frac{X_{i}w - y_{i}}{\sigma}\right)\sigma\right) + \frac{1}{2}\lambda {||w||_2}^2} - * \end{align} - * $$ - *
- * - * where - * - *
- * $$ - * \begin{align} - * H_m(z) = \begin{cases} - * z^2, & \text {if } |z| < \epsilon, \\ - * 2\epsilon|z| - \epsilon^2, & \text{otherwise} - * \end{cases} - * \end{align} - * $$ - *
- * - * It is advised to set the parameter $\epsilon$ to 1.35 to achieve 95% statistical efficiency - * for normally distributed data. Please refer to chapter 2 of - * - * A robust hybrid of lasso and ridge regression for more detail. - * - * @param fitIntercept Whether to fit an intercept term. - * @param epsilon The shape parameter to control the amount of robustness. - * @param bcFeaturesStd The broadcast standard deviation values of the features. - * @param bcParameters including three parts: the regression coefficients corresponding - * to the features, the intercept (if fitIntercept is ture) - * and the scale parameter (sigma). - */ -private[ml] class HuberAggregatorX( - fitIntercept: Boolean, - epsilon: Double, - bcFeaturesStd: Broadcast[Array[Double]])(bcParameters: Broadcast[Vector]) - extends DifferentiableLossAggregatorX[Instance, HuberAggregatorX] { - - protected override val dim: Int = bcParameters.value.size - private val numFeatures: Int = if (fitIntercept) dim - 2 else dim - 1 - - @transient private lazy val parametersArray = DoubleArrayList.wrap(bcParameters.value match { - case DenseVector(values) => values - case _ => throw new IllegalArgumentException(s"coefficients only supports dense vector but " + - s"got type ${bcParameters.value.getClass}.)") - }) - @transient private lazy val featuresStd = DoubleArrayList.wrap(bcFeaturesStd.value) - @transient private lazy val sigma: Double = parametersArray.getDouble(dim - 1) - @transient private lazy val intercept: Double = if (fitIntercept) { - parametersArray.getDouble(dim - 2) - } else { - 0.0 - } - - /** - * Add a new training instance to this HuberAggregator, and update the loss and gradient - * of the objective function. - * - * @param instance The instance of data point to be added. - * @return This HuberAggregator object. - */ - def add(instance: Instance): HuberAggregatorX = { - instance match { case Instance(label, weight, features) => - require(numFeatures == features.size, s"Dimensions mismatch when adding new sample." + - s" Expecting $numFeatures but got ${features.size}.") - require(weight >= 0.0, s"instance weight, $weight has to be >= 0.0") - - if (weight == 0.0) return this - val localFeaturesStd = featuresStd - val localCoefficients = parametersArray - val localGradientSumArray = gradientSumArray - - val margin = { - var sum = 0.0 - features.foreachActive { (index, value) => - sum += localCoefficients.getDouble(index) * value * localFeaturesStd.getDouble(index) - } - if (fitIntercept) sum += intercept - sum - } - val linearLoss = label - margin - - if (math.abs(linearLoss) <= sigma * epsilon) { - lossSum += 0.5 * weight * (sigma + math.pow(linearLoss, 2.0) / sigma) - val linearLossDivSigma = linearLoss / sigma - - features.foreachActive { (index, value) => - localGradientSumArray.set(index, localGradientSumArray.getDouble(index) - - 1.0 * weight * linearLossDivSigma * value * localFeaturesStd.getDouble(index)) - } - if (fitIntercept) { - localGradientSumArray.set(dim - 2, localGradientSumArray.getDouble(dim - 2) - - 1.0 * weight * linearLossDivSigma) - } - localGradientSumArray.set(dim - 1, localGradientSumArray.getDouble(dim - 1) - + 0.5 * weight * (1.0 - math.pow(linearLossDivSigma, 2.0))) - } else { - val sign = if (linearLoss >= 0) -1.0 else 1.0 - lossSum += 0.5 * weight * - (sigma + 2.0 * epsilon * math.abs(linearLoss) - sigma * epsilon * epsilon) - - features.foreachActive { (index, value) => - localGradientSumArray.set(index, localGradientSumArray.getDouble(index) - + weight * sign * epsilon * value * localFeaturesStd.getDouble(index)) - } - if (fitIntercept) { - localGradientSumArray.set(dim - 2, localGradientSumArray.getDouble(dim - 2) - + weight * sign * epsilon) - } - localGradientSumArray.set(dim - 1, localGradientSumArray.getDouble(dim - 1) - + 0.5 * weight * (1.0 - epsilon * epsilon)) - } - - weightSum += weight - this - } - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/LeastSquaresAggregatorX.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/LeastSquaresAggregatorX.scala deleted file mode 100644 index 9677969..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/LeastSquaresAggregatorX.scala +++ /dev/null @@ -1,226 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.spark.ml.optim.aggregator - -import it.unimi.dsi.fastutil.doubles.DoubleArrayList - -import org.apache.spark.broadcast.Broadcast -import org.apache.spark.ml.feature.Instance -import org.apache.spark.ml.linalg.{BLAS, Vector, Vectors} - -/** - * LeastSquaresAggregator computes the gradient and loss for a Least-squared loss function, - * as used in linear regression for samples in sparse or dense vector in an online fashion. - * - * Two LeastSquaresAggregator can be merged together to have a summary of loss and gradient of - * the corresponding joint dataset. - * - * For improving the convergence rate during the optimization process, and also preventing against - * features with very large variances exerting an overly large influence during model training, - * package like R's GLMNET performs the scaling to unit variance and removing the mean to reduce - * the condition number, and then trains the model in scaled space but returns the coefficients in - * the original scale. See page 9 in http://cran.r-project.org/web/packages/glmnet/glmnet.pdf - * - * However, we don't want to apply the `StandardScaler` on the training dataset, and then cache - * the standardized dataset since it will create a lot of overhead. As a result, we perform the - * scaling implicitly when we compute the objective function. The following is the mathematical - * derivation. - * - * Note that we don't deal with intercept by adding bias here, because the intercept - * can be computed using closed form after the coefficients are converged. - * See this discussion for detail. - * http://stats.stackexchange.com/questions/13617/how-is-the-intercept-computed-in-glmnet - * - * When training with intercept enabled, - * The objective function in the scaled space is given by - * - *
- * $$ - * L = 1/2n ||\sum_i w_i(x_i - \bar{x_i}) / \hat{x_i} - (y - \bar{y}) / \hat{y}||^2, - * $$ - *
- * - * where $\bar{x_i}$ is the mean of $x_i$, $\hat{x_i}$ is the standard deviation of $x_i$, - * $\bar{y}$ is the mean of label, and $\hat{y}$ is the standard deviation of label. - * - * If we fitting the intercept disabled (that is forced through 0.0), - * we can use the same equation except we set $\bar{y}$ and $\bar{x_i}$ to 0 instead - * of the respective means. - * - * This can be rewritten as - * - *
- * $$ - * \begin{align} - * L &= 1/2n ||\sum_i (w_i/\hat{x_i})x_i - \sum_i (w_i/\hat{x_i})\bar{x_i} - y / \hat{y} - * + \bar{y} / \hat{y}||^2 \\ - * &= 1/2n ||\sum_i w_i^\prime x_i - y / \hat{y} + offset||^2 = 1/2n diff^2 - * \end{align} - * $$ - *
- * - * where $w_i^\prime$ is the effective coefficients defined by $w_i/\hat{x_i}$, offset is - * - *
- * $$ - * - \sum_i (w_i/\hat{x_i})\bar{x_i} + \bar{y} / \hat{y}. - * $$ - *
- * - * and diff is - * - *
- * $$ - * \sum_i w_i^\prime x_i - y / \hat{y} + offset - * $$ - *
- * - * Note that the effective coefficients and offset don't depend on training dataset, - * so they can be precomputed. - * - * Now, the first derivative of the objective function in scaled space is - * - *
- * $$ - * \frac{\partial L}{\partial w_i} = diff/N (x_i - \bar{x_i}) / \hat{x_i} - * $$ - *
- * - * However, $(x_i - \bar{x_i})$ will densify the computation, so it's not - * an ideal formula when the training dataset is sparse format. - * - * This can be addressed by adding the dense $\bar{x_i} / \hat{x_i}$ terms - * in the end by keeping the sum of diff. The first derivative of total - * objective function from all the samples is - * - * - *
- * $$ - * \begin{align} - * \frac{\partial L}{\partial w_i} &= - * 1/N \sum_j diff_j (x_{ij} - \bar{x_i}) / \hat{x_i} \\ - * &= 1/N ((\sum_j diff_j x_{ij} / \hat{x_i}) - diffSum \bar{x_i} / \hat{x_i}) \\ - * &= 1/N ((\sum_j diff_j x_{ij} / \hat{x_i}) + correction_i) - * \end{align} - * $$ - *
- * - * where $correction_i = - diffSum \bar{x_i} / \hat{x_i}$ - * - * A simple math can show that diffSum is actually zero, so we don't even - * need to add the correction terms in the end. From the definition of diff, - * - *
- * $$ - * \begin{align} - * diffSum &= \sum_j (\sum_i w_i(x_{ij} - \bar{x_i}) - * / \hat{x_i} - (y_j - \bar{y}) / \hat{y}) \\ - * &= N * (\sum_i w_i(\bar{x_i} - \bar{x_i}) / \hat{x_i} - (\bar{y} - \bar{y}) / \hat{y}) \\ - * &= 0 - * \end{align} - * $$ - *
- * - * As a result, the first derivative of the total objective function only depends on - * the training dataset, which can be easily computed in distributed fashion, and is - * sparse format friendly. - * - *
- * $$ - * \frac{\partial L}{\partial w_i} = 1/N ((\sum_j diff_j x_{ij} / \hat{x_i}) - * $$ - *
- * - * @note The constructor is curried, since the cost function will repeatedly create new versions - * of this class for different coefficient vectors. - * - * @param labelStd The standard deviation value of the label. - * @param labelMean The mean value of the label. - * @param fitIntercept Whether to fit an intercept term. - * @param bcFeaturesStd The broadcast standard deviation values of the features. - * @param bcFeaturesMean The broadcast mean values of the features. - * @param bcCoefficients The broadcast coefficients corresponding to the features. - */ -private[ml] class LeastSquaresAggregatorX( - labelStd: Double, - labelMean: Double, - fitIntercept: Boolean, - bcFeaturesStd: Broadcast[Array[Double]], - bcFeaturesMean: Broadcast[Array[Double]])(bcCoefficients: Broadcast[Vector]) - extends DifferentiableLossAggregatorX[Instance, LeastSquaresAggregatorX] { - require(labelStd > 0.0, s"${this.getClass.getName} requires the label standard " + - s"deviation to be positive.") - - private val numFeatures = bcFeaturesStd.value.length - protected override val dim: Int = numFeatures - // make transient so we do not serialize between aggregation stages - @transient private lazy val featuresStd = DoubleArrayList.wrap(bcFeaturesStd.value) - @transient private lazy val effectiveCoefAndOffset = { - val coefficientsArray = bcCoefficients.value.toArray.clone() - val featuresMean = bcFeaturesMean.value - var sum = 0.0 - var i = 0 - val len = coefficientsArray.length - while (i < len) { - coefficientsArray(i) *= featuresStd.getDouble(i) - sum += coefficientsArray(i) * featuresMean(i) - i += 1 - } - val offset = if (fitIntercept) labelMean / labelStd - sum else 0.0 - (Vectors.dense(coefficientsArray), offset) - } - // do not use tuple assignment above because it will circumvent the @transient tag - @transient private lazy val effectiveCoefficientsVector = effectiveCoefAndOffset._1 - @transient private lazy val offset = effectiveCoefAndOffset._2 - - /** - * Add a new training instance to this LeastSquaresAggregator, and update the loss and gradient - * of the objective function. - * - * @param instance The instance of data point to be added. - * @return This LeastSquaresAggregator object. - */ - def add(instance: Instance): LeastSquaresAggregatorX = { - instance match { case Instance(label, weight, features) => - require(numFeatures == features.size, s"Dimensions mismatch when adding new sample." + - s" Expecting $numFeatures but got ${features.size}.") - require(weight >= 0.0, s"instance weight, $weight has to be >= 0.0") - - if (weight == 0.0) return this - - val diff = BLAS.dot(features, effectiveCoefficientsVector) - label / labelStd + offset - - if (diff != 0) { - val localGradientSumArray = gradientSumArray - val localFeaturesStd = featuresStd - features.foreachActive { (index, value) => - localGradientSumArray.set(index, localGradientSumArray.getDouble(index) - + weight * diff * value * localFeaturesStd.getDouble(index)) - } - lossSum += weight * diff * diff / 2.0 - } - weightSum += weight - this - } - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/LogisticAggregatorX.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/LogisticAggregatorX.scala deleted file mode 100644 index 6ac09f6..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/aggregator/LogisticAggregatorX.scala +++ /dev/null @@ -1,379 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.spark.ml.optim.aggregator - -import it.unimi.dsi.fastutil.doubles.DoubleArrayList - -import org.apache.spark.broadcast.Broadcast -import org.apache.spark.internal.Logging -import org.apache.spark.ml.feature.Instance -import org.apache.spark.ml.linalg.{DenseVector, Vector} -import org.apache.spark.mllib.util.MLUtils - -/** - * LogisticAggregator computes the gradient and loss for binary or multinomial logistic (softmax) - * loss function, as used in classification for instances in sparse or dense vector in an online - * fashion. - * - * Two LogisticAggregators can be merged together to have a summary of loss and gradient of - * the corresponding joint dataset. - * - * For improving the convergence rate during the optimization process and also to prevent against - * features with very large variances exerting an overly large influence during model training, - * packages like R's GLMNET perform the scaling to unit variance and remove the mean in order to - * reduce the condition number. The model is then trained in this scaled space, but returns the - * coefficients in the original scale. See page 9 in - * http://cran.r-project.org/web/packages/glmnet/glmnet.pdf - * - * However, we don't want to apply the [[org.apache.spark.ml.feature.StandardScaler]] on the - * training dataset, and then cache the standardized dataset since it will create a lot of overhead. - * As a result, we perform the scaling implicitly when we compute the objective function (though - * we do not subtract the mean). - * - * Note that there is a difference between multinomial (softmax) and binary loss. The binary case - * uses one outcome class as a "pivot" and regresses the other class against the pivot. In the - * multinomial case, the softmax loss function is used to model each class probability - * independently. Using softmax loss produces `K` sets of coefficients, while using a pivot class - * produces `K - 1` sets of coefficients (a single coefficient vector in the binary case). In the - * binary case, we can say that the coefficients are shared between the positive and negative - * classes. When regularization is applied, multinomial (softmax) loss will produce a result - * different from binary loss since the positive and negative don't share the coefficients while the - * binary regression shares the coefficients between positive and negative. - * - * The following is a mathematical derivation for the multinomial (softmax) loss. - * - * The probability of the multinomial outcome $y$ taking on any of the K possible outcomes is: - * - *
- * $$ - * P(y_i=0|\vec{x}_i, \beta) = \frac{e^{\vec{x}_i^T \vec{\beta}_0}}{\sum_{k=0}^{K-1} - * e^{\vec{x}_i^T \vec{\beta}_k}} \\ - * P(y_i=1|\vec{x}_i, \beta) = \frac{e^{\vec{x}_i^T \vec{\beta}_1}}{\sum_{k=0}^{K-1} - * e^{\vec{x}_i^T \vec{\beta}_k}}\\ - * P(y_i=K-1|\vec{x}_i, \beta) = \frac{e^{\vec{x}_i^T \vec{\beta}_{K-1}}\,}{\sum_{k=0}^{K-1} - * e^{\vec{x}_i^T \vec{\beta}_k}} - * $$ - *
- * - * The model coefficients $\beta = (\beta_0, \beta_1, \beta_2, ..., \beta_{K-1})$ become a matrix - * which has dimension of $K \times (N+1)$ if the intercepts are added. If the intercepts are not - * added, the dimension will be $K \times N$. - * - * Note that the coefficients in the model above lack identifiability. That is, any constant scalar - * can be added to all of the coefficients and the probabilities remain the same. - * - *
- * $$ - * \begin{align} - * \frac{e^{\vec{x}_i^T \left(\vec{\beta}_0 + \vec{c}\right)}}{\sum_{k=0}^{K-1} - * e^{\vec{x}_i^T \left(\vec{\beta}_k + \vec{c}\right)}} - * = \frac{e^{\vec{x}_i^T \vec{\beta}_0}e^{\vec{x}_i^T \vec{c}}\,}{e^{\vec{x}_i^T \vec{c}} - * \sum_{k=0}^{K-1} e^{\vec{x}_i^T \vec{\beta}_k}} - * = \frac{e^{\vec{x}_i^T \vec{\beta}_0}}{\sum_{k=0}^{K-1} e^{\vec{x}_i^T \vec{\beta}_k}} - * \end{align} - * $$ - *
- * - * However, when regularization is added to the loss function, the coefficients are indeed - * identifiable because there is only one set of coefficients which minimizes the regularization - * term. When no regularization is applied, we choose the coefficients with the minimum L2 - * penalty for consistency and reproducibility. For further discussion see: - * - * Friedman, et al. "Regularization Paths for Generalized Linear Models via Coordinate Descent" - * - * The loss of objective function for a single instance of data (we do not include the - * regularization term here for simplicity) can be written as - * - *
- * $$ - * \begin{align} - * \ell\left(\beta, x_i\right) &= -log{P\left(y_i \middle| \vec{x}_i, \beta\right)} \\ - * &= log\left(\sum_{k=0}^{K-1}e^{\vec{x}_i^T \vec{\beta}_k}\right) - \vec{x}_i^T \vec{\beta}_y\\ - * &= log\left(\sum_{k=0}^{K-1} e^{margins_k}\right) - margins_y - * \end{align} - * $$ - *
- * - * where ${margins}_k = \vec{x}_i^T \vec{\beta}_k$. - * - * For optimization, we have to calculate the first derivative of the loss function, and a simple - * calculation shows that - * - *
- * $$ - * \begin{align} - * \frac{\partial \ell(\beta, \vec{x}_i, w_i)}{\partial \beta_{j, k}} - * &= x_{i,j} \cdot w_i \cdot \left(\frac{e^{\vec{x}_i \cdot \vec{\beta}_k}}{\sum_{k'=0}^{K-1} - * e^{\vec{x}_i \cdot \vec{\beta}_{k'}}\,} - I_{y=k}\right) \\ - * &= x_{i, j} \cdot w_i \cdot multiplier_k - * \end{align} - * $$ - *
- * - * where $w_i$ is the sample weight, $I_{y=k}$ is an indicator function - * - *
- * $$ - * I_{y=k} = \begin{cases} - * 1 & y = k \\ - * 0 & else - * \end{cases} - * $$ - *
- * - * and - * - *
- * $$ - * multiplier_k = \left(\frac{e^{\vec{x}_i \cdot \vec{\beta}_k}}{\sum_{k=0}^{K-1} - * e^{\vec{x}_i \cdot \vec{\beta}_k}} - I_{y=k}\right) - * $$ - *
- * - * If any of margins is larger than 709.78, the numerical computation of multiplier and loss - * function will suffer from arithmetic overflow. This issue occurs when there are outliers in - * data which are far away from the hyperplane, and this will cause the failing of training once - * infinity is introduced. Note that this is only a concern when max(margins) > 0. - * - * Fortunately, when max(margins) = maxMargin > 0, the loss function and the multiplier can - * easily be rewritten into the following equivalent numerically stable formula. - * - *
- * $$ - * \ell\left(\beta, x\right) = log\left(\sum_{k=0}^{K-1} e^{margins_k - maxMargin}\right) - - * margins_{y} + maxMargin - * $$ - *
- * - * Note that each term, $(margins_k - maxMargin)$ in the exponential is no greater than zero; as a - * result, overflow will not happen with this formula. - * - * For $multiplier$, a similar trick can be applied as the following, - * - *
- * $$ - * multiplier_k = \left(\frac{e^{\vec{x}_i \cdot \vec{\beta}_k - maxMargin}}{\sum_{k'=0}^{K-1} - * e^{\vec{x}_i \cdot \vec{\beta}_{k'} - maxMargin}} - I_{y=k}\right) - * $$ - *
- * - * - * @param bcCoefficients The broadcast coefficients corresponding to the features. - * @param bcFeaturesStd The broadcast standard deviation values of the features. - * @param numClasses the number of possible outcomes for k classes classification problem in - * Multinomial Logistic Regression. - * @param fitIntercept Whether to fit an intercept term. - * @param multinomial Whether to use multinomial (softmax) or binary loss - * @note In order to avoid unnecessary computation during calculation of the gradient updates - * we lay out the coefficients in column major order during training. This allows us to - * perform feature standardization once, while still retaining sequential memory access - * for speed. We convert back to row major order when we create the model, - * since this form is optimal for the matrix operations used for prediction. - */ -private[ml] class LogisticAggregatorX( - bcFeaturesStd: Broadcast[Array[Double]], - numClasses: Int, - fitIntercept: Boolean, - multinomial: Boolean)(bcCoefficients: Broadcast[Vector]) - extends DifferentiableLossAggregatorX[Instance, LogisticAggregatorX] with Logging { - - private val numFeatures = bcFeaturesStd.value.length - private val numFeaturesPlusIntercept = if (fitIntercept) numFeatures + 1 else numFeatures - private val coefficientSize = bcCoefficients.value.size - protected override val dim: Int = coefficientSize - if (multinomial) { - require(numClasses == coefficientSize / numFeaturesPlusIntercept, s"The number of " + - s"coefficients should be ${numClasses * numFeaturesPlusIntercept} but was $coefficientSize") - } else { - require(coefficientSize == numFeaturesPlusIntercept, s"Expected $numFeaturesPlusIntercept " + - s"coefficients but got $coefficientSize") - require(numClasses == 1 || numClasses == 2, s"Binary logistic aggregator requires numClasses " + - s"in {1, 2} but found $numClasses.") - } - - @transient private lazy val coefficientsArray = DoubleArrayList.wrap(bcCoefficients.value match { - case DenseVector(values) => values - case _ => throw new IllegalArgumentException(s"coefficients only supports dense vector but " + - s"got type ${bcCoefficients.value.getClass}.)") - }) - @transient private lazy val featuresStdArray = DoubleArrayList.wrap(bcFeaturesStd.value) - - if (multinomial && numClasses <= 2) { - logInfo(s"Multinomial logistic regression for binary classification yields separate " + - s"coefficients for positive and negative classes. When no regularization is applied, the" + - s"result will be effectively the same as binary logistic regression. When regularization" + - s"is applied, multinomial loss will produce a result different from binary loss.") - } - - /** Update gradient and loss using binary loss function. */ - private def binaryUpdateInPlace(features: Vector, weight: Double, label: Double): Unit = { - - val localFeaturesStd = featuresStdArray - val localCoefficients = coefficientsArray - val localGradientArray = gradientSumArray - val margin = - { - var sum = 0.0 - features.foreachActive { (index, value) => - sum += localCoefficients.getDouble(index) * value * localFeaturesStd.getDouble(index) - } - if (fitIntercept) sum += localCoefficients.getDouble(numFeaturesPlusIntercept - 1) - sum - } - - val multiplier = weight * (1.0 / (1.0 + math.exp(margin)) - label) - - features.foreachActive { (index, value) => - localGradientArray.set(index, localGradientArray.getDouble(index) - + multiplier * value * localFeaturesStd.getDouble(index)) - } - - if (fitIntercept) { - localGradientArray.set(numFeaturesPlusIntercept - 1, - localGradientArray.getDouble(numFeaturesPlusIntercept - 1) + multiplier) - } - - if (label > 0) { - // The following is equivalent to log(1 + exp(margin)) but more numerically stable. - lossSum += weight * MLUtils.log1pExp(margin) - } else { - lossSum += weight * (MLUtils.log1pExp(margin) - margin) - } - } - - /** Update gradient and loss using multinomial (softmax) loss function. */ - private def multinomialUpdateInPlace(features: Vector, weight: Double, label: Double): Unit = { - // TODO: use level 2 BLAS operations - /* - Note: this can still be used when numClasses = 2 for binary - logistic regression without pivoting. - */ - val localFeaturesStd = featuresStdArray - val localCoefficients = coefficientsArray - val localGradientArray = gradientSumArray - - // marginOfLabel is margins(label) in the formula - var marginOfLabel = 0.0 - var maxMargin = Double.NegativeInfinity - - val margins = new Array[Double](numClasses) - features.foreachActive { (index, value) => - val localFeaturesStdValue = localFeaturesStd.getDouble(index) - if (localFeaturesStdValue != 0.0 && value != 0.0) { - val stdValue = value * localFeaturesStdValue - var j = 0 - while (j < numClasses) { - margins(j) += localCoefficients.getDouble(index * numClasses + j) * stdValue - j += 1 - } - } - } - var i = 0 - while (i < numClasses) { - if (fitIntercept) { - margins(i) += localCoefficients.getDouble(numClasses * numFeatures + i) - } - if (i == label.toInt) marginOfLabel = margins(i) - if (margins(i) > maxMargin) { - maxMargin = margins(i) - } - i += 1 - } - - /** - * When maxMargin is greater than 0, the original formula could cause overflow. - * We address this by subtracting maxMargin from all the margins, so it's guaranteed - * that all of the new margins will be smaller than zero to prevent arithmetic overflow. - */ - val multipliers = new Array[Double](numClasses) - val sum = { - var temp = 0.0 - var i = 0 - while (i < numClasses) { - if (maxMargin > 0) margins(i) -= maxMargin - val exp = math.exp(margins(i)) - temp += exp - multipliers(i) = exp - i += 1 - } - temp - } - - margins.indices.foreach { i => - multipliers(i) = multipliers(i) / sum - (if (label == i) 1.0 else 0.0) - } - features.foreachActive { (index, value) => - val localFeaturesStdValue = localFeaturesStd.getDouble(index) - if (localFeaturesStdValue != 0.0 && value != 0.0) { - val stdValue = value * localFeaturesStdValue - var j = 0 - while (j < numClasses) { - val id = index * numClasses + j - localGradientArray.set(id, localGradientArray.getDouble(id) - + weight * multipliers(j) * stdValue) - j += 1 - } - } - } - if (fitIntercept) { - var i = 0 - while (i < numClasses) { - val id = numFeatures * numClasses + i - localGradientArray.set(id, localGradientArray.getDouble(id) - + weight * multipliers(i)) - i += 1 - } - } - - val loss = if (maxMargin > 0) { - math.log(sum) - marginOfLabel + maxMargin - } else { - math.log(sum) - marginOfLabel - } - lossSum += weight * loss - } - - /** - * Add a new training instance to this LogisticAggregator, and update the loss and gradient - * of the objective function. - * - * @param instance The instance of data point to be added. - * @return This LogisticAggregator object. - */ - def add(instance: Instance): this.type = { - instance match { case Instance(label, weight, features) => - require(numFeatures == features.size, s"Dimensions mismatch when adding new instance." + - s" Expecting $numFeatures but got ${features.size}.") - require(weight >= 0.0, s"instance weight, $weight has to be >= 0.0") - - if (weight == 0.0) return this - - if (multinomial) { - multinomialUpdateInPlace(features, weight, label) - } else { - binaryUpdateInPlace(features, weight, label) - } - weightSum += weight - this - } - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/loss/RDDLossFunctionX.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/loss/RDDLossFunctionX.scala deleted file mode 100644 index c548b90..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/optim/loss/RDDLossFunctionX.scala +++ /dev/null @@ -1,77 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.spark.ml.optim.loss - -import scala.reflect.ClassTag - -import breeze.linalg.{DenseVector => BDV} -import breeze.optimize.DiffFunction - -import org.apache.spark.broadcast.Broadcast -import org.apache.spark.ml.linalg.{BLAS, Vector, Vectors} -import org.apache.spark.ml.optim.aggregator.DifferentiableLossAggregatorX -import org.apache.spark.rdd.RDD - -/** - * This class computes the gradient and loss of a differentiable loss function by mapping a - * [[DifferentiableLossAggregatorX]] over an [[RDD]]. The loss function is the - * sum of the loss computed on a single instance across all points in the RDD. Therefore, the actual - * analytical form of the loss function is specified by the aggregator, which computes each points - * contribution to the overall loss. - * - * A differentiable regularization component can also be added by providing a - * [[DifferentiableRegularization]] loss function. - * - * @param instances RDD containing the data to compute the loss function over. - * @param getAggregator A function which gets a new loss aggregator in every tree aggregate step. - * @param regularization An option representing the regularization loss function to apply to the - * coefficients. - * @param aggregationDepth The aggregation depth of the tree aggregation step. - * @tparam Agg Specialization of [[DifferentiableLossAggregatorX]], representing the concrete type - * of the aggregator. - */ -private[ml] class RDDLossFunctionX[ - T: ClassTag, - Agg <: DifferentiableLossAggregatorX[T, Agg]: ClassTag]( - instances: RDD[T], - getAggregator: (Broadcast[Vector] => Agg), - regularization: Option[DifferentiableRegularization[Vector]], - aggregationDepth: Int = 2) - extends DiffFunction[BDV[Double]] { - - override def calculate(coefficients: BDV[Double]): (Double, BDV[Double]) = { - val bcCoefficients = instances.context.broadcast(Vectors.fromBreeze(coefficients)) - val thisAgg = getAggregator(bcCoefficients) - val seqOp = (agg: Agg, x: T) => agg.add(x) - val combOp = (agg1: Agg, agg2: Agg) => agg1.merge(agg2) - val newAgg = instances.treeAggregate(thisAgg)(seqOp, combOp, aggregationDepth) - val gradient = newAgg.gradient - val regLoss = regularization.map { regFun => - val (regLoss, regGradient) = regFun.calculate(Vectors.fromBreeze(coefficients)) - BLAS.axpy(1.0, regGradient, gradient) - regLoss - }.getOrElse(0.0) - bcCoefficients.destroy(blocking = false) - (newAgg.loss + regLoss, gradient.asBreeze.toDenseVector) - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala index 917bb52..e3c4bae 100644 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala +++ b/ml-accelerator/src/main/scala/org/apache/spark/ml/recommendation/ALS.scala @@ -1,9 +1,3 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -38,17 +32,18 @@ import org.apache.hadoop.fs.Path import org.json4s.DefaultFormats import org.json4s.JsonDSL._ -import org.apache.spark.{Dependency, Partitioner, ShuffleDependency, SparkContext} -import org.apache.spark.annotation.{DeveloperApi, Since} +import org.apache.spark.{Partitioner, SparkException} +import org.apache.spark.annotation.Since import org.apache.spark.internal.Logging -import org.apache.spark.ml.{Estimator, Model, StaticUtils} +import org.apache.spark.ml.{Estimator, Model} import org.apache.spark.ml.linalg.BLAS import org.apache.spark.ml.param._ import org.apache.spark.ml.param.shared._ import org.apache.spark.ml.util._ +import org.apache.spark.ml.util.Instrumentation.instrumented import org.apache.spark.mllib.linalg.CholeskyDecomposition import org.apache.spark.mllib.optimization.NNLS -import org.apache.spark.rdd.RDD +import org.apache.spark.rdd.{DeterministicLevel, RDD} import org.apache.spark.sql.{DataFrame, Dataset} import org.apache.spark.sql.functions._ import org.apache.spark.sql.types._ @@ -57,11 +52,11 @@ import org.apache.spark.util.{BoundedPriorityQueue, Utils} import org.apache.spark.util.collection.{OpenHashMap, OpenHashSet, SortDataFormat, Sorter} import org.apache.spark.util.random.XORShiftRandom - /** * Common params for ALS and ALSModel. */ -private[recommendation] trait ALSModelParams extends Params with HasPredictionCol { +private[recommendation] trait ALSModelParams extends Params with HasPredictionCol + with HasBlockSize { /** * Param for the column name for user ids. Ids must be integers. Other * numeric types are supported for this column, but will be cast to integers as long as they @@ -132,13 +127,15 @@ private[recommendation] trait ALSModelParams extends Params with HasPredictionCo /** @group expertGetParam */ def getColdStartStrategy: String = $(coldStartStrategy).toLowerCase(Locale.ROOT) + + setDefault(blockSize -> 4096) } /** * Common params for ALS. */ private[recommendation] trait ALSParams extends ALSModelParams with HasMaxIter with HasRegParam - with HasPredictionCol with HasCheckpointInterval with HasSeed { + with HasCheckpointInterval with HasSeed { /** * Param for rank of the matrix factorization (positive). @@ -295,6 +292,15 @@ class ALSModel private[ml] ( @Since("2.2.0") def setColdStartStrategy(value: String): this.type = set(coldStartStrategy, value) + /** + * Set block size for stacking input data in matrices. + * Default is 4096. + * + * @group expertSetParam + */ + @Since("3.0.0") + def setBlockSize(value: Int): this.type = set(blockSize, value) + private val predict = udf { (featuresA: Seq[Float], featuresB: Seq[Float]) => if (featuresA != null && featuresB != null) { var dotProduct = 0.0f @@ -345,6 +351,11 @@ class ALSModel private[ml] ( @Since("1.6.0") override def write: MLWriter = new ALSModel.ALSModelWriter(this) + @Since("3.0.0") + override def toString: String = { + s"ALSModel: uid=$uid, rank=$rank" + } + /** * Returns top `numItems` items recommended for each user, for all users. * @param numItems max number of recommendations for each user @@ -353,7 +364,7 @@ class ALSModel private[ml] ( */ @Since("2.2.0") def recommendForAllUsers(numItems: Int): DataFrame = { - recommendForAll(userFactors, itemFactors, $(userCol), $(itemCol), numItems) + recommendForAll(userFactors, itemFactors, $(userCol), $(itemCol), numItems, $(blockSize)) } /** @@ -368,7 +379,7 @@ class ALSModel private[ml] ( @Since("2.3.0") def recommendForUserSubset(dataset: Dataset[_], numItems: Int): DataFrame = { val srcFactorSubset = getSourceFactorSubset(dataset, userFactors, $(userCol)) - recommendForAll(srcFactorSubset, itemFactors, $(userCol), $(itemCol), numItems) + recommendForAll(srcFactorSubset, itemFactors, $(userCol), $(itemCol), numItems, $(blockSize)) } /** @@ -379,7 +390,7 @@ class ALSModel private[ml] ( */ @Since("2.2.0") def recommendForAllItems(numUsers: Int): DataFrame = { - recommendForAll(itemFactors, userFactors, $(itemCol), $(userCol), numUsers) + recommendForAll(itemFactors, userFactors, $(itemCol), $(userCol), numUsers, $(blockSize)) } /** @@ -394,7 +405,7 @@ class ALSModel private[ml] ( @Since("2.3.0") def recommendForItemSubset(dataset: Dataset[_], numUsers: Int): DataFrame = { val srcFactorSubset = getSourceFactorSubset(dataset, itemFactors, $(itemCol)) - recommendForAll(srcFactorSubset, userFactors, $(itemCol), $(userCol), numUsers) + recommendForAll(srcFactorSubset, userFactors, $(itemCol), $(userCol), numUsers, $(blockSize)) } /** @@ -443,11 +454,12 @@ class ALSModel private[ml] ( dstFactors: DataFrame, srcOutputColumn: String, dstOutputColumn: String, - num: Int): DataFrame = { + num: Int, + blockSize: Int): DataFrame = { import srcFactors.sparkSession.implicits._ - val srcFactorsBlocked = blockify(srcFactors.as[(Int, Array[Float])]) - val dstFactorsBlocked = blockify(dstFactors.as[(Int, Array[Float])]) + val srcFactorsBlocked = blockify(srcFactors.as[(Int, Array[Float])], blockSize) + val dstFactorsBlocked = blockify(dstFactors.as[(Int, Array[Float])], blockSize) val ratings = srcFactorsBlocked.crossJoin(dstFactorsBlocked) .as[(Seq[(Int, Array[Float])], Seq[(Int, Array[Float])])] .flatMap { case (srcIter, dstIter) => @@ -485,11 +497,10 @@ class ALSModel private[ml] ( /** * Blockifies factors to improve the efficiency of cross join - * TODO: SPARK-20443 - expose blockSize as a param? */ private def blockify( factors: Dataset[(Int, Array[Float])], - blockSize: Int = 4096): Dataset[Seq[(Int, Array[Float])]] = { + blockSize: Int): Dataset[Seq[(Int, Array[Float])]] = { import factors.sparkSession.implicits._ factors.mapPartitions(_.grouped(blockSize)) } @@ -537,7 +548,7 @@ object ALSModel extends MLReadable[ALSModel] { val model = new ALSModel(metadata.uid, rank, userFactors, itemFactors) - DefaultParamsReader.getAndSetParams(model, metadata) + metadata.getAndSetParams(model) model } } @@ -564,13 +575,20 @@ object ALSModel extends MLReadable[ALSModel] { * * For implicit preference data, the algorithm used is based on * "Collaborative Filtering for Implicit Feedback Datasets", available at - * http://dx.doi.org/10.1109/ICDM.2008.22, adapted for the blocked approach used here. + * https://doi.org/10.1109/ICDM.2008.22, adapted for the blocked approach used here. * * Essentially instead of finding the low-rank approximations to the rating matrix `R`, * this finds the approximations for a preference matrix `P` where the elements of `P` are 1 if * r is greater than 0 and 0 if r is less than or equal to 0. The ratings then act as 'confidence' * values related to strength of indicated user * preferences rather than explicit ratings given to items. + * + * Note: the input rating dataset to the ALS implementation should be deterministic. + * Nondeterministic data can cause failure during fitting ALS model. + * For example, an order-sensitive operation like sampling after a repartition makes dataset + * output nondeterministic, like `dataset.repartition(2).sample(false, 0.5, 1618)`. + * Checkpointing sampled dataset or adding a sort before sampling can help make the dataset + * deterministic. */ @Since("1.3.0") class ALS(@Since("1.4.0") override val uid: String) extends Estimator[ALSModel] with ALSParams @@ -649,6 +667,15 @@ class ALS(@Since("1.4.0") override val uid: String) extends Estimator[ALSModel] @Since("2.2.0") def setColdStartStrategy(value: String): this.type = set(coldStartStrategy, value) + /** + * Set block size for stacking input data in matrices. + * Default is 4096. + * + * @group expertSetParam + */ + @Since("3.0.0") + def setBlockSize(value: Int): this.type = set(blockSize, value) + /** * Sets both numUserBlocks and numItemBlocks to the specific value. * @@ -661,10 +688,8 @@ class ALS(@Since("1.4.0") override val uid: String) extends Estimator[ALSModel] this } - - @Since("2.0.0") - override def fit(dataset: Dataset[_]): ALSModel = { + override def fit(dataset: Dataset[_]): ALSModel = instrumented { instr => transformSchema(dataset.schema) import dataset.sparkSession.implicits._ @@ -676,11 +701,11 @@ class ALS(@Since("1.4.0") override val uid: String) extends Estimator[ALSModel] Rating(row.getInt(0), row.getInt(1), row.getFloat(2)) } - - val instr = Instrumentation.create(this, ratings) - instr.logParams(rank, numUserBlocks, numItemBlocks, implicitPrefs, alpha, userCol, + instr.logPipelineStage(this) + instr.logDataset(dataset) + instr.logParams(this, rank, numUserBlocks, numItemBlocks, implicitPrefs, alpha, userCol, itemCol, ratingCol, predictionCol, maxIter, regParam, nonnegative, checkpointInterval, - seed, intermediateStorageLevel, finalStorageLevel) + seed, intermediateStorageLevel, finalStorageLevel, blockSize) val (userFactors, itemFactors) = ALS.train(ratings, rank = $(rank), numUserBlocks = $(numUserBlocks), numItemBlocks = $(numItemBlocks), @@ -691,8 +716,8 @@ class ALS(@Since("1.4.0") override val uid: String) extends Estimator[ALSModel] checkpointInterval = $(checkpointInterval), seed = $(seed)) val userDF = userFactors.toDF("id", "features") val itemDF = itemFactors.toDF("id", "features") - val model = new ALSModel(uid, $(rank), userDF, itemDF).setParent(this) - instr.logSuccess(model) + val model = new ALSModel(uid, $(rank), userDF, itemDF).setBlockSize($(blockSize)) + .setParent(this) copyValues(model) } @@ -707,21 +732,17 @@ class ALS(@Since("1.4.0") override val uid: String) extends Estimator[ALSModel] /** - * :: DeveloperApi :: * An implementation of ALS that supports generic ID types, specialized for Int and Long. This is * exposed as a developer API for users who do need other ID types. But it is not recommended * because it increases the shuffle size and memory requirement during training. For simplicity, * users and items must have the same type. The number of distinct users/items should be smaller * than 2 billion. */ -@DeveloperApi object ALS extends DefaultParamsReadable[ALS] with Logging { /** - * :: DeveloperApi :: * Rating class for better code readability. */ - @DeveloperApi case class Rating[@specialized(Int, Long) ID](user: ID, item: ID, rating: Float) @Since("1.6.0") @@ -804,7 +825,7 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { * Given a triangular matrix in the order of fillXtX above, compute the full symmetric square * matrix that it represents, storing it into destMatrix. */ - private def fillAtA(triAtA: Array[Double], lambda: Double) { + private def fillAtA(triAtA: Array[Double], lambda: Double): Unit = { var i = 0 var pos = 0 var a = 0.0 @@ -862,7 +883,7 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { } /** Adds an observation. */ - def add(a: Array[Float], b: Double, c: Double = 1.0): this.type = { + def add(a: Array[Float], b: Double, c: Double = 1.0): NormalEquation = { require(c >= 0.0) require(a.length == k) copyToDouble(a) @@ -874,7 +895,7 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { } /** Merges another normal equation object. */ - def merge(other: NormalEquation): this.type = { + def merge(other: NormalEquation): NormalEquation = { require(other.k == k) blas.daxpy(ata.length, 1.0, other.ata, 1, ata, 1) blas.daxpy(atb.length, 1.0, other.atb, 1, atb, 1) @@ -892,7 +913,6 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { val DEFAULT_UNPERSIST_CYCLE = 300 /** - * :: DeveloperApi :: * Implementation of the ALS algorithm. * * This implementation of the ALS factorization algorithm partitions the two sets of factors among @@ -917,7 +937,6 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { * "block" as referring to a subset of an RDD containing the ratings rather than a contiguous * submatrix of the ratings matrix. */ - @DeveloperApi def train[ID: ClassTag]( // scalastyle:ignore ratings: RDD[Rating[ID]], rank: Int = 10, @@ -964,7 +983,6 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { val joinIU = mergedIU.join(userInBlocks).persist() joinIU.foreachPartition(_) - // Encoders for storing each user/item's partition ID and index within its partition using a // single integer; used as an optimization val userLocalIndexEncoder = new LocalIndexEncoder(userPart.numPartitions) @@ -1017,6 +1035,7 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { case x: Exception => throw new Exception("'spark.boostkit.ALS.blockMaxRow' value is invalid") } + if (implicitPrefs) { val dataIterI = new Array[RDD[(Int, ALS.FactorBlock)]](unpersistCycle) val dataIterU = new Array[RDD[(Int, ALS.FactorBlock)]](unpersistCycle) @@ -1068,14 +1087,16 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { } } - val userIdAndFactors = userInBlocks .mapValues(_.srcIds) .join(userFactors) .mapPartitions({ items => items.flatMap { case (_, (ids, factors)) => - ids.view.zip(factors) + ids.iterator.zip(factors.iterator) } + // Preserve the partitioning because IDs + // are consistent with the partitioners in userInBlocks + // and userFactors. }, preservesPartitioning = true) .setName("userFactors") .persist(finalRDDStorageLevel) @@ -1084,20 +1105,20 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { .join(itemFactors) .mapPartitions({ items => items.flatMap { case (_, (ids, factors)) => - ids.view.zip(factors) + ids.iterator.zip(factors.iterator) } }, preservesPartitioning = true) .setName("itemFactors") .persist(finalRDDStorageLevel) if (finalRDDStorageLevel != StorageLevel.NONE) { userIdAndFactors.count() - itemFactors.unpersist() - itemIdAndFactors.count() userInBlocks.unpersist() userOutBlocks.unpersist() - itemInBlocks.unpersist() itemOutBlocks.unpersist() blockRatings.unpersist() + itemIdAndFactors.count() + itemFactors.unpersist() + itemInBlocks.unpersist() } (userIdAndFactors, itemIdAndFactors) } @@ -1291,16 +1312,19 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { // elements distributed as Normal(0,1) and taking the absolute value, and then normalizing. // This appears to create factorizations that have a slightly better reconstruction // (<1%) compared picking elements uniformly at random in [0,1]. - inBlocks.map { case (srcBlockId, inBlock) => - val random = new XORShiftRandom(byteswap64(seed ^ srcBlockId)) - val factors = Array.fill(inBlock.srcIds.length) { - val factor = Array.fill(rank)(random.nextGaussian().toFloat) - val nrm = blas.snrm2(rank, factor, 1) - blas.sscal(rank, 1.0f / nrm, factor, 1) - factor + inBlocks.mapPartitions({ iter => + iter.map { + case (srcBlockId, inBlock) => + val random = new XORShiftRandom(byteswap64(seed ^ srcBlockId)) + val factors = Array.fill(inBlock.srcIds.length) { + val factor = Array.fill(rank)(random.nextGaussian().toFloat) + val nrm = blas.snrm2(rank, factor, 1) + blas.sscal(rank, 1.0f / nrm, factor, 1) + factor + } + (srcBlockId, factors) } - (srcBlockId, factors) - } + }, preservesPartitioning = true) } /** @@ -1396,7 +1420,7 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { Iterator.empty } } ++ { - builders.view.zipWithIndex.filter(_._1.size > 0).map { case (block, idx) => + builders.iterator.zipWithIndex.filter(_._1.size > 0).map { case (block, idx) => val srcBlockId = idx % srcPart.numPartitions val dstBlockId = idx / srcPart.numPartitions ((srcBlockId, dstBlockId), block.build()) @@ -1632,7 +1656,7 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { val dstIdSet = new OpenHashSet[ID](1 << 20) dstIds.foreach(dstIdSet.add) val sortedDstIds = new Array[ID](dstIdSet.size) - var i = StaticUtils.ZERO_INT + var i = 0 var pos = dstIdSet.nextPos(0) while (pos != -1) { sortedDstIds(i) = dstIdSet.getValue(pos) @@ -1784,6 +1808,7 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { ne.copyATA(d) } + /** * Encoder for storing (blockId, localIndex) into a single integer. * @@ -1827,31 +1852,4 @@ object ALS extends DefaultParamsReadable[ALS] with Logging { * satisfies this requirement, we simply use a type alias here. */ private[recommendation] type ALSPartitioner = org.apache.spark.HashPartitioner - - /** - * Private function to clean up all of the shuffles files from the dependencies and their parents. - */ - private[spark] def cleanShuffleDependencies[T]( - sc: SparkContext, - deps: Seq[Dependency[_]], - blocking: Boolean = false): Unit = { - // If there is no reference tracking we skip clean up. - sc.cleaner.foreach { cleaner => - /** - * Clean the shuffles & all of its parents. - */ - def cleanEagerly(dep: Dependency[_]): Unit = { - if (dep.isInstanceOf[ShuffleDependency[_, _, _]]) { - val shuffleId = dep.asInstanceOf[ShuffleDependency[_, _, _]].shuffleId - cleaner.doCleanupShuffle(shuffleId, blocking) - } - val rdd = dep.rdd - val rddDeps = rdd.dependencies - if (rdd.getStorageLevel == StorageLevel.NONE && rddDeps != null) { - rddDeps.foreach(cleanEagerly) - } - } - deps.foreach(cleanEagerly) - } - } -} +} \ No newline at end of file diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala deleted file mode 100644 index c6f1aa4..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/DecisionTreeRegressor.scala +++ /dev/null @@ -1,184 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.regression - -import it.unimi.dsi.fastutil.doubles.DoubleArrayList -import it.unimi.dsi.fastutil.ints.{Int2ObjectOpenHashMap, IntArrayList} -import it.unimi.dsi.fastutil.objects.ObjectArrayList - -import org.apache.spark.annotation.Since -import org.apache.spark.broadcast.Broadcast -import org.apache.spark.ml.Predictor -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.linalg.Vector -import org.apache.spark.ml.param.ParamMap -import org.apache.spark.ml.tree._ -import org.apache.spark.ml.tree.impl.DecisionForest -import org.apache.spark.ml.tree.impl.DecisionTreeMetadata -import org.apache.spark.ml.tree.impl.RandomForest4GBDTX -import org.apache.spark.ml.tree.impl.TreePoint -import org.apache.spark.ml.util._ -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, Strategy => OldStrategy} -import org.apache.spark.rdd.RDD -import org.apache.spark.sql.Dataset - - -/** - * Decision tree - * learning algorithm for regression. - * It supports both continuous and categorical features. - */ -@Since("1.4.0") -class DecisionTreeRegressor @Since("1.4.0") (@Since("1.4.0") override val uid: String) - extends Predictor[Vector, DecisionTreeRegressor, DecisionTreeRegressionModel] - with DecisionTreeRegressorParams with DefaultParamsWritable { - - @Since("1.4.0") - def this() = this(Identifiable.randomUID("dtr")) - - // Override parameter setters from parent trait for Java API compatibility. - /** @group setParam */ - @Since("1.4.0") - override def setMaxDepth(value: Int): this.type = set(maxDepth, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMaxBins(value: Int): this.type = set(maxBins, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInfoGain(value: Double): this.type = set(minInfoGain, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value) - - /** - * Specifies how often to checkpoint the cached node IDs. - * E.g. 10 means that the cache will get checkpointed every 10 iterations. - * This is only used if cacheNodeIds is true and if the checkpoint directory is set in - * [[org.apache.spark.SparkContext]]. - * Must be at least 1. - * (default = 10) - * @group setParam - */ - @Since("1.4.0") - override def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value) - - /** @group setParam */ - @Since("1.4.0") - override def setImpurity(value: String): this.type = set(impurity, value) - - /** @group setParam */ - @Since("1.6.0") - override def setSeed(value: Long): this.type = set(seed, value) - - /** @group setParam */ - @Since("2.0.0") - def setVarianceCol(value: String): this.type = set(varianceCol, value) - - override protected def train(dataset: Dataset[_]): DecisionTreeRegressionModel = { - val categoricalFeatures: Map[Int, Int] = - MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol))) - val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset) - val strategy = getOldStrategy(categoricalFeatures) - - val instr = Instrumentation.create(this, oldDataset) - instr.logParams(params: _*) - - val trees = DecisionForest.run(oldDataset, strategy, numTrees = 1, - featureSubsetStrategy = "all", seed = $(seed), instr = Some(instr), parentUID = Some(uid)) - - val m = trees.head.asInstanceOf[DecisionTreeRegressionModel] - instr.logSuccess(m) - m - } - - /** (private[ml]) Train a decision tree on an RDD */ - private[ml] def train( - data: RDD[LabeledPoint], - oldStrategy: OldStrategy, - featureSubsetStrategy: String): DecisionTreeRegressionModel = { - val instr = Instrumentation.create(this, data) - instr.logParams(params: _*) - - val trees = DecisionForest.run(data, oldStrategy, numTrees = 1, featureSubsetStrategy, - seed = $(seed), instr = Some(instr), parentUID = Some(uid)) - - val m = trees.head.asInstanceOf[DecisionTreeRegressionModel] - instr.logSuccess(m) - m - } - - /** (private[ml]) Train a decision tree on an RDD */ - private[ml] def train4GBDTX( - labelArrayBc: Broadcast[DoubleArrayList], - processedInput: RDD[(Int, (IntArrayList, ObjectArrayList[Split]))], - metadata: DecisionTreeMetadata, - splits: Array[Array[Split]], - oldStrategy: OldStrategy, - featureSubsetStrategy: String, - input: RDD[TreePoint], - rawPartInfoBc: Broadcast[Int2ObjectOpenHashMap[IntArrayList]]): - DecisionTreeRegressionModel = { - val instr = Instrumentation.create(this, processedInput) - instr.logParams(params: _*) - - val trees = RandomForest4GBDTX.runX(labelArrayBc, processedInput, metadata, - splits, oldStrategy, numTrees = 1, seed = $(seed), input, - rawPartInfoBc, parentUID = Some(uid)) - - val m = trees.head.asInstanceOf[DecisionTreeRegressionModel] - instr.logSuccess(m) - m - } - - - /** (private[ml]) Create a Strategy instance to use with the old API. */ - private[ml] def getOldStrategy(categoricalFeatures: Map[Int, Int]): OldStrategy = { - super.getOldStrategy(categoricalFeatures, numClasses = 0, OldAlgo.Regression, getOldImpurity, - subsamplingRate = 1.0) - } - - @Since("1.4.0") - override def copy(extra: ParamMap): DecisionTreeRegressor = defaultCopy(extra) -} - -@Since("1.4.0") -object DecisionTreeRegressor extends DefaultParamsReadable[DecisionTreeRegressor] { - /** Accessor for supported impurities: variance */ - final val supportedImpurities: Array[String] = TreeRegressorParams.supportedImpurities - - @Since("2.0.0") - override def load(path: String): DecisionTreeRegressor = super.load(path) -} - diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/GBTRegressor.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/GBTRegressor.scala deleted file mode 100644 index ac7c2ba..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/GBTRegressor.scala +++ /dev/null @@ -1,354 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.regression - -import com.github.fommil.netlib.BLAS.{getInstance => blas} -import org.json4s.{DefaultFormats, JObject} -import org.json4s.JsonDSL._ - -import org.apache.spark.annotation.Since -import org.apache.spark.internal.Logging -import org.apache.spark.ml.{PredictionModel, Predictor} -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.linalg.Vector -import org.apache.spark.ml.param.ParamMap -import org.apache.spark.ml.tree._ -import org.apache.spark.ml.tree.impl.GradientBoostedTrees -import org.apache.spark.ml.util._ -import org.apache.spark.ml.util.DefaultParamsReader.Metadata -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo} -import org.apache.spark.mllib.tree.model.{GradientBoostedTreesModel => OldGBTModel} -import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{DataFrame, Dataset} -import org.apache.spark.sql.functions._ - -/** - * Gradient-Boosted Trees (GBTs) - * learning algorithm for regression. - * It supports both continuous and categorical features. - * - * The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999. - * - * Notes on Gradient Boosting vs. TreeBoost: - * - This implementation is for Stochastic Gradient Boosting, not for TreeBoost. - * - Both algorithms learn tree ensembles by minimizing loss functions. - * - TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes - * based on the loss function, whereas the original gradient boosting method does not. - * - When the loss is SquaredError, these methods give the same result, but they could differ - * for other loss functions. - * - We expect to implement TreeBoost in the future: - * [https://issues.apache.org/jira/browse/SPARK-4240] - */ -@Since("1.4.0") -class GBTRegressor @Since("1.4.0") (@Since("1.4.0") override val uid: String) - extends Predictor[Vector, GBTRegressor, GBTRegressionModel] - with GBTRegressorParams with DefaultParamsWritable with Logging { - - @Since("1.4.0") - def this() = this(Identifiable.randomUID("gbtr")) - - // Override parameter setters from parent trait for Java API compatibility. - - // Parameters from TreeRegressorParams: - - /** @group setParam */ - @Since("1.4.0") - override def setMaxDepth(value: Int): this.type = set(maxDepth, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMaxBins(value: Int): this.type = set(maxBins, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInfoGain(value: Double): this.type = set(minInfoGain, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value) - - /** - * Specifies how often to checkpoint the cached node IDs. - * E.g. 10 means that the cache will get checkpointed every 10 iterations. - * This is only used if cacheNodeIds is true and if the checkpoint directory is set in - * [[org.apache.spark.SparkContext]]. - * Must be at least 1. - * (default = 10) - * @group setParam - */ - @Since("1.4.0") - override def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value) - - /** - * The impurity setting is ignored for GBT models. - * Individual trees are built using impurity "Variance." - * - * @group setParam - */ - @Since("1.4.0") - override def setImpurity(value: String): this.type = { - logWarning("GBTRegressor.setImpurity should NOT be used") - this - } - - // Parameters from TreeEnsembleParams: - - /** @group setParam */ - @Since("1.4.0") - override def setSubsamplingRate(value: Double): this.type = set(subsamplingRate, value) - - /** @group setParam */ - @Since("1.4.0") - override def setSeed(value: Long): this.type = set(seed, value) - - // Parameters from GBTParams: - - /** @group setParam */ - @Since("1.4.0") - override def setMaxIter(value: Int): this.type = set(maxIter, value) - - /** @group setParam */ - @Since("1.4.0") - override def setStepSize(value: Double): this.type = set(stepSize, value) - - // Parameters from GBTRegressorParams: - - /** @group setParam */ - @Since("1.4.0") - def setLossType(value: String): this.type = set(lossType, value) - - /** @group setParam */ - @Since("2.3.0") - override def setFeatureSubsetStrategy(value: String): this.type = - set(featureSubsetStrategy, value) - - override protected def train(dataset: Dataset[_]): GBTRegressionModel = { - val categoricalFeatures: Map[Int, Int] = - MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol))) - val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset) - val numFeatures = oldDataset.first().features.size - val boostingStrategy = super.getOldBoostingStrategy(categoricalFeatures, OldAlgo.Regression) - - val instr = Instrumentation.create(this, oldDataset) - instr.logParams(labelCol, featuresCol, predictionCol, impurity, lossType, - maxDepth, maxBins, maxIter, maxMemoryInMB, minInfoGain, minInstancesPerNode, - seed, stepSize, subsamplingRate, cacheNodeIds, checkpointInterval, featureSubsetStrategy) - instr.logNumFeatures(numFeatures) - - val (doUseAcc, setUseAccFlag) = super.getDoUseAcc - val (baseLearners, learnerWeights) = if (setUseAccFlag) { - GradientBoostedTrees.run(oldDataset, boostingStrategy, - $(seed), $(featureSubsetStrategy), doUseAcc) - } else { - GradientBoostedTrees.run(oldDataset, boostingStrategy, - $(seed), $(featureSubsetStrategy)) - } - val m = new GBTRegressionModel(uid, baseLearners, learnerWeights, numFeatures) - instr.logSuccess(m) - m - } - - @Since("1.4.0") - override def copy(extra: ParamMap): GBTRegressor = defaultCopy(extra) -} - -@Since("1.4.0") -object GBTRegressor extends DefaultParamsReadable[GBTRegressor] { - - /** Accessor for supported loss settings: squared (L2), absolute (L1) */ - @Since("1.4.0") - final val supportedLossTypes: Array[String] = GBTRegressorParams.supportedLossTypes - - @Since("2.0.0") - override def load(path: String): GBTRegressor = super.load(path) -} - -/** - * Gradient-Boosted Trees (GBTs) - * model for regression. - * It supports both continuous and categorical features. - * @param _trees Decision trees in the ensemble. - * @param _treeWeights Weights for the decision trees in the ensemble. - */ -@Since("1.4.0") -class GBTRegressionModel private[ml]( - override val uid: String, - private val _trees: Array[DecisionTreeRegressionModel], - private val _treeWeights: Array[Double], - override val numFeatures: Int) - extends PredictionModel[Vector, GBTRegressionModel] - with GBTRegressorParams with TreeEnsembleModel[DecisionTreeRegressionModel] - with MLWritable with Serializable { - - require(_trees.nonEmpty, "GBTRegressionModel requires at least 1 tree.") - require(_trees.length == _treeWeights.length, "GBTRegressionModel given trees, treeWeights of" + - s" non-matching lengths (${_trees.length}, ${_treeWeights.length}, respectively).") - - /** - * Construct a GBTRegressionModel - * @param _trees Decision trees in the ensemble. - * @param _treeWeights Weights for the decision trees in the ensemble. - */ - @Since("1.4.0") - def this(uid: String, _trees: Array[DecisionTreeRegressionModel], _treeWeights: Array[Double]) = - this(uid, _trees, _treeWeights, -1) - - @Since("1.4.0") - override def trees: Array[DecisionTreeRegressionModel] = _trees - - /** - * Number of trees in ensemble - */ - @Since("2.0.0") - val getNumTrees: Int = trees.length - - @Since("1.4.0") - override def treeWeights: Array[Double] = _treeWeights - - override protected def transformImpl(dataset: Dataset[_]): DataFrame = { - val bcastModel = dataset.sparkSession.sparkContext.broadcast(this) - val predictUDF = udf { (features: Any) => - bcastModel.value.predict(features.asInstanceOf[Vector]) - } - dataset.withColumn($(predictionCol), predictUDF(col($(featuresCol)))) - } - - override protected def predict(features: Vector): Double = { - // TODO: When we add a generic Boosting class, handle transform there? SPARK-7129 - // Classifies by thresholding sum of weighted tree predictions - val treePredictions = _trees.map(_.rootNode.predictImpl(features).prediction) - blas.ddot(numTrees, treePredictions, 1, _treeWeights, 1) - } - - /** Number of trees in ensemble */ - val numTrees: Int = trees.length - - @Since("1.4.0") - override def copy(extra: ParamMap): GBTRegressionModel = { - copyValues(new GBTRegressionModel(uid, _trees, _treeWeights, numFeatures), - extra).setParent(parent) - } - - @Since("1.4.0") - override def toString: String = { - s"GBTRegressionModel (uid=$uid) with $numTrees trees" - } - - /** - * Estimate of the importance of each feature. - * - * Each feature's importance is the average of its importance across all trees in the ensemble - * The importance vector is normalized to sum to 1. This method is suggested by Hastie et al. - * (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.) - * and follows the implementation from scikit-learn. - * - * @see `DecisionTreeRegressionModel.featureImportances` - */ - @Since("2.0.0") - lazy val featureImportances: Vector = TreeEnsembleModel.featureImportances(trees, numFeatures) - - /** (private[ml]) Convert to a model in the old API */ - private[ml] def toOld: OldGBTModel = { - new OldGBTModel(OldAlgo.Regression, _trees.map(_.toOld), _treeWeights) - } - - @Since("2.0.0") - override def write: MLWriter = new GBTRegressionModel.GBTRegressionModelWriter(this) -} - -@Since("2.0.0") -object GBTRegressionModel extends MLReadable[GBTRegressionModel] { - - @Since("2.0.0") - override def read: MLReader[GBTRegressionModel] = new GBTRegressionModelReader - - @Since("2.0.0") - override def load(path: String): GBTRegressionModel = super.load(path) - - private[GBTRegressionModel] - class GBTRegressionModelWriter(instance: GBTRegressionModel) extends MLWriter { - - override protected def saveImpl(path: String): Unit = { - val extraMetadata: JObject = Map( - "numFeatures" -> instance.numFeatures, - "numTrees" -> instance.getNumTrees) - EnsembleModelReadWrite.saveImpl(instance, path, sparkSession, extraMetadata) - } - } - - private class GBTRegressionModelReader extends MLReader[GBTRegressionModel] { - - /** Checked against metadata when loading model */ - private val className = classOf[GBTRegressionModel].getName - private val treeClassName = classOf[DecisionTreeRegressionModel].getName - - override def load(path: String): GBTRegressionModel = { - implicit val format = DefaultFormats - val (metadata: Metadata, treesData: Array[(Metadata, Node)], treeWeights: Array[Double]) = - EnsembleModelReadWrite.loadImpl(path, sparkSession, className, treeClassName) - - val numFeatures = (metadata.metadata \ "numFeatures").extract[Int] - val numTrees = (metadata.metadata \ "numTrees").extract[Int] - - val trees: Array[DecisionTreeRegressionModel] = treesData.map { - case (treeMetadata, root) => - val tree = - new DecisionTreeRegressionModel(treeMetadata.uid, root, numFeatures) - DefaultParamsReader.getAndSetParams(tree, treeMetadata) - tree - } - - require(numTrees == trees.length, s"GBTRegressionModel.load expected $numTrees" + - s" trees based on metadata but found ${trees.length} trees.") - - val model = new GBTRegressionModel(metadata.uid, trees, treeWeights, numFeatures) - DefaultParamsReader.getAndSetParams(model, metadata) - model - } - } - - /** Convert a model from the old API */ - private[ml] def fromOld( - oldModel: OldGBTModel, - parent: GBTRegressor, - categoricalFeatures: Map[Int, Int], - numFeatures: Int = -1): GBTRegressionModel = { - require(oldModel.algo == OldAlgo.Regression, "Cannot convert GradientBoostedTreesModel" + - s" with algo=${oldModel.algo} (old API) to GBTRegressionModel (new API).") - val newTrees = oldModel.trees.map { tree => - // parent for each tree is null since there is no good way to set this. - DecisionTreeRegressionModel.fromOld(tree, null, categoricalFeatures) - } - val uid = if (parent != null) parent.uid else Identifiable.randomUID("gbtr") - new GBTRegressionModel(uid, newTrees, oldModel.treeWeights, numFeatures) - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala deleted file mode 100644 index 0f00bce..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala +++ /dev/null @@ -1,565 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.regression - -import scala.collection.mutable - -import breeze.linalg.{DenseVector => BDV} -import breeze.optimize.{CachedDiffFunction, LBFGSL, OWLQNL} - -import org.apache.spark.SparkException -import org.apache.spark.annotation.Since -import org.apache.spark.internal.Logging -import org.apache.spark.ml.StaticUtils -import org.apache.spark.ml.feature.Instance -import org.apache.spark.ml.linalg.{Vector, Vectors} -import org.apache.spark.ml.linalg.BLAS._ -import org.apache.spark.ml.optim.WeightedLeastSquares -import org.apache.spark.ml.optim.aggregator.{HuberAggregatorX, LeastSquaresAggregatorX} -import org.apache.spark.ml.optim.loss.{L2Regularization, RDDLossFunctionX} -import org.apache.spark.ml.param.ParamMap -import org.apache.spark.ml.util._ -import org.apache.spark.mllib.linalg.VectorImplicits._ -import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer -import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{Dataset, Row} -import org.apache.spark.sql.functions._ -import org.apache.spark.storage.StorageLevel - -/** - * Linear regression. - * - * The learning objective is to minimize the specified loss function, with regularization. - * This supports two kinds of loss: - * - squaredError (a.k.a squared loss) - * - huber (a hybrid of squared error for relatively small errors and absolute error for - * relatively large ones, and we estimate the scale parameter from training data) - * - * This supports multiple types of regularization: - * - none (a.k.a. ordinary least squares) - * - L2 (ridge regression) - * - L1 (Lasso) - * - L2 + L1 (elastic net) - * - * The squared error objective function is: - * - *
- * $$ - * \begin{align} - * \min_{w}\frac{1}{2n}{\sum_{i=1}^n(X_{i}w - y_{i})^{2} + - * \lambda\left[\frac{1-\alpha}{2}{||w||_{2}}^{2} + \alpha{||w||_{1}}\right]} - * \end{align} - * $$ - *
- * - * The huber objective function is: - * - *
- * $$ - * \begin{align} - * \min_{w, \sigma}\frac{1}{2n}{\sum_{i=1}^n\left(\sigma + - * H_m\left(\frac{X_{i}w - y_{i}}{\sigma}\right)\sigma\right) + \frac{1}{2}\lambda {||w||_2}^2} - * \end{align} - * $$ - *
- * - * where - * - *
- * $$ - * \begin{align} - * H_m(z) = \begin{cases} - * z^2, & \text {if } |z| < \epsilon, \\ - * 2\epsilon|z| - \epsilon^2, & \text{otherwise} - * \end{cases} - * \end{align} - * $$ - *
- * - * Note: Fitting with huber loss only supports none and L2 regularization. - */ -@Since("1.3.0") -class LinearRegression @Since("1.3.0") (@Since("1.3.0") override val uid: String) - extends Regressor[Vector, LinearRegression, LinearRegressionModel] - with LinearRegressionParams with DefaultParamsWritable with Logging { - - import LinearRegression._ - - @Since("1.4.0") - def this() = this(Identifiable.randomUID("linReg")) - - /** - * Set the regularization parameter. - * Default is 0.0. - * - * @group setParam - */ - @Since("1.3.0") - def setRegParam(value: Double): this.type = set(regParam, value) - setDefault(regParam -> 0.0) - - /** - * Set if we should fit the intercept. - * Default is true. - * - * @group setParam - */ - @Since("1.5.0") - def setFitIntercept(value: Boolean): this.type = set(fitIntercept, value) - setDefault(fitIntercept -> true) - - /** - * Whether to standardize the training features before fitting the model. - * The coefficients of models will be always returned on the original scale, - * so it will be transparent for users. - * Default is true. - * - * @note With/without standardization, the models should be always converged - * to the same solution when no regularization is applied. In R's GLMNET package, - * the default behavior is true as well. - * - * @group setParam - */ - @Since("1.5.0") - def setStandardization(value: Boolean): this.type = set(standardization, value) - setDefault(standardization -> true) - - /** - * Set the ElasticNet mixing parameter. - * For alpha = 0, the penalty is an L2 penalty. - * For alpha = 1, it is an L1 penalty. - * For alpha in (0,1), the penalty is a combination of L1 and L2. - * Default is 0.0 which is an L2 penalty. - * - * Note: Fitting with huber loss only supports None and L2 regularization, - * so throws exception if this param is non-zero value. - * - * @group setParam - */ - @Since("1.4.0") - def setElasticNetParam(value: Double): this.type = set(elasticNetParam, value) - setDefault(elasticNetParam -> 0.0) - - /** - * Set the maximum number of iterations. - * Default is 100. - * - * @group setParam - */ - @Since("1.3.0") - def setMaxIter(value: Int): this.type = set(maxIter, value) - setDefault(maxIter -> 100) - - /** - * Set the convergence tolerance of iterations. - * Smaller value will lead to higher accuracy with the cost of more iterations. - * Default is 1E-6. - * - * @group setParam - */ - @Since("1.4.0") - def setTol(value: Double): this.type = set(tol, value) - setDefault(tol -> 1E-6) - - /** - * Whether to over-/under-sample training instances according to the given weights in weightCol. - * If not set or empty, all instances are treated equally (weight 1.0). - * Default is not set, so all instances have weight one. - * - * @group setParam - */ - @Since("1.6.0") - def setWeightCol(value: String): this.type = set(weightCol, value) - - /** - * Set the solver algorithm used for optimization. - * In case of linear regression, this can be "l-bfgs", "normal" and "auto". - * - "l-bfgs" denotes Limited-memory BFGS which is a limited-memory quasi-Newton - * optimization method. - * - "normal" denotes using Normal Equation as an analytical solution to the linear regression - * problem. This solver is limited to `LinearRegression.MAX_FEATURES_FOR_NORMAL_SOLVER`. - * - "auto" (default) means that the solver algorithm is selected automatically. - * The Normal Equations solver will be used when possible, but this will automatically fall - * back to iterative optimization methods when needed. - * - * Note: Fitting with huber loss doesn't support normal solver, - * so throws exception if this param was set with "normal". - * @group setParam - */ - @Since("1.6.0") - def setSolver(value: String): this.type = set(solver, value) - setDefault(solver -> Auto) - - /** - * Suggested depth for treeAggregate (greater than or equal to 2). - * If the dimensions of features or the number of partitions are large, - * this param could be adjusted to a larger size. - * Default is 2. - * - * @group expertSetParam - */ - @Since("2.1.0") - def setAggregationDepth(value: Int): this.type = set(aggregationDepth, value) - setDefault(aggregationDepth -> 2) - - /** - * Sets the value of param [[loss]]. - * Default is "squaredError". - * - * @group setParam - */ - @Since("2.3.0") - def setLoss(value: String): this.type = set(loss, value) - setDefault(loss -> SquaredError) - - /** - * Sets the value of param [[epsilon]]. - * Default is 1.35. - * - * @group setExpertParam - */ - @Since("2.3.0") - def setEpsilon(value: Double): this.type = set(epsilon, value) - setDefault(epsilon -> 1.35) - - override protected def train(dataset: Dataset[_]): LinearRegressionModel = { - // Extract the number of features before deciding optimization solver. - val numFeatures = dataset.select(col($(featuresCol))).first() - .getAs[Vector](StaticUtils.ZERO_INT).size - val w = if (!isDefined(weightCol) || $(weightCol).isEmpty) lit(1.0) else col($(weightCol)) - - val instances: RDD[Instance] = dataset.select( - col($(labelCol)), w, col($(featuresCol))).rdd.map { - case Row(label: Double, weight: Double, features: Vector) => - Instance(label, weight + StaticUtils.ZERO_DOUBLE, features) - } - - val instr = Instrumentation.create(this, dataset) - instr.logParams(labelCol, featuresCol, weightCol, predictionCol, solver, tol, elasticNetParam, - fitIntercept, maxIter, regParam, standardization, aggregationDepth, loss, epsilon) - instr.logNumFeatures(numFeatures) - - if ($(loss) == SquaredError && (($(solver) == Auto && - numFeatures <= WeightedLeastSquares.MAX_NUM_FEATURES) || $(solver) == Normal)) { - // For low dimensional data, WeightedLeastSquares is more efficient since the - // training algorithm only requires one pass through the data. (SPARK-10668) - - val optimizer = new WeightedLeastSquares($(fitIntercept), $(regParam), - elasticNetParam = $(elasticNetParam), $(standardization), true, - solverType = WeightedLeastSquares.Auto, maxIter = $(maxIter), tol = $(tol)) - val model = optimizer.fit(instances) - // When it is trained by WeightedLeastSquares, training summary does not - // attach returned model. - val lrModel = copyValues(new LinearRegressionModel(uid, model.coefficients, model.intercept)) - val (summaryModel, predictionColName) = lrModel.findSummaryModelAndPredictionCol() - val trainingSummary = new LinearRegressionTrainingSummary( - summaryModel.transform(dataset), - predictionColName, - $(labelCol), - $(featuresCol), - summaryModel, - model.diagInvAtWA.toArray, - model.objectiveHistory) - - lrModel.setSummary(Some(trainingSummary)) - instr.logSuccess(lrModel) - return lrModel - } - - val handlePersistence = dataset.storageLevel == StorageLevel.NONE - if (handlePersistence) instances.persist(StorageLevel.MEMORY_AND_DISK) - - val (featuresSummarizer, ySummarizer) = { - val seqOp = (c: (MultivariateOnlineSummarizer, MultivariateOnlineSummarizer), - instance: Instance) => - (c._1.add(instance.features, instance.weight), - c._2.add(Vectors.dense(instance.label), instance.weight)) - - val combOp = (c1: (MultivariateOnlineSummarizer, MultivariateOnlineSummarizer), - c2: (MultivariateOnlineSummarizer, MultivariateOnlineSummarizer)) => - (c1._1.merge(c2._1), c1._2.merge(c2._2)) - - instances.treeAggregate( - (new MultivariateOnlineSummarizer, new MultivariateOnlineSummarizer) - )(seqOp, combOp, $(aggregationDepth)) - } - - val yMean = ySummarizer.mean(0) - val rawYStd = math.sqrt(ySummarizer.variance(0)) - if (rawYStd == 0.0) { - if ($(fitIntercept) || yMean == 0.0) { - // If the rawYStd==0 and fitIntercept==true, then the intercept is yMean with - // zero coefficient; as a result, training is not needed. - // Also, if yMean==0 and rawYStd==0, all the coefficients are zero regardless of - // the fitIntercept. - if (yMean == 0.0) { - logWarning(s"Mean and standard deviation of the label are zero, so the coefficients " + - s"and the intercept will all be zero; as a result, training is not needed.") - } else { - logWarning(s"The standard deviation of the label is zero, so the coefficients will be " + - s"zeros and the intercept will be the mean of the label; as a result, " + - s"training is not needed.") - } - if (handlePersistence) instances.unpersist() - val coefficients = Vectors.sparse(numFeatures, Seq.empty) - val intercept = yMean - - val model = copyValues(new LinearRegressionModel(uid, coefficients, intercept)) - // Handle possible missing or invalid prediction columns - val (summaryModel, predictionColName) = model.findSummaryModelAndPredictionCol() - - val trainingSummary = new LinearRegressionTrainingSummary( - summaryModel.transform(dataset), - predictionColName, - $(labelCol), - $(featuresCol), - model, - Array(0D), - Array(0D)) - - model.setSummary(Some(trainingSummary)) - instr.logSuccess(model) - return model - } else { - require($(regParam) == 0.0, "The standard deviation of the label is zero. " + - "Model cannot be regularized.") - logWarning(s"The standard deviation of the label is zero. " + - "Consider setting fitIntercept=true.") - } - } - - // if y is constant (rawYStd is zero), then y cannot be scaled. In this case - // setting yStd=abs(yMean) ensures that y is not scaled anymore in l-bfgs algorithm. - val yStd = if (rawYStd > 0) rawYStd else math.abs(yMean) - val featuresMean = featuresSummarizer.mean.toArray - val featuresStd = featuresSummarizer.variance.toArray.map(math.sqrt) - val bcFeaturesMean = instances.context.broadcast(featuresMean) - val bcFeaturesStd = instances.context.broadcast(featuresStd.map(t => - if (t != 0d) 1d / t else 0d)) - - if (!$(fitIntercept) && (0 until numFeatures).exists { i => - featuresStd(i) == 0.0 && featuresMean(i) != 0.0 }) { - logWarning("Fitting LinearRegressionModel without intercept on dataset with " + - "constant nonzero column, Spark MLlib outputs zero coefficients for constant nonzero " + - "columns. This behavior is the same as R glmnet but different from LIBSVM.") - } - - // Since we implicitly do the feature scaling when we compute the cost function - // to improve the convergence, the effective regParam will be changed. - val effectiveRegParam = $(loss) match { - case SquaredError => $(regParam) / yStd - case Huber => $(regParam) - } - val effectiveL1RegParam = $(elasticNetParam) * effectiveRegParam - val effectiveL2RegParam = (1.0 - $(elasticNetParam)) * effectiveRegParam - - val getFeaturesStd = (j: Int) => if (j >= 0 && j < numFeatures) featuresStd(j) else 0.0 - val regularization = if (effectiveL2RegParam != 0.0) { - val shouldApply = (idx: Int) => idx >= 0 && idx < numFeatures - Some(new L2Regularization(effectiveL2RegParam, shouldApply, - if ($(standardization)) None else Some(getFeaturesStd))) - } else { - None - } - - val costFun = $(loss) match { - case SquaredError => - val getAggregatorFunc = new LeastSquaresAggregatorX(yStd, yMean, $(fitIntercept), - bcFeaturesStd, bcFeaturesMean)(_) - new RDDLossFunctionX(instances, getAggregatorFunc, regularization, $(aggregationDepth)) - case Huber => - val getAggregatorFunc = new HuberAggregatorX($(fitIntercept), $(epsilon), bcFeaturesStd)(_) - new RDDLossFunctionX(instances, getAggregatorFunc, regularization, $(aggregationDepth)) - } - - val optimizer = $(loss) match { - case SquaredError => - val dim = numFeatures - if ($(elasticNetParam) == 0.0 || effectiveRegParam == 0.0) { - new LBFGSL($(maxIter), 10, $(tol)) - } else { - val standardizationParam = $(standardization) - val effectiveL1Reg = - if (standardizationParam) { - BDV[Double](Array.fill(dim)(effectiveL1RegParam)) - } else { - // If `standardization` is false, we still standardize the data - // to improve the rate of convergence; as a result, we have to - // perform this reverse standardization by penalizing each component - // differently to get effectively the same objective function when - // the training dataset is not standardized. - BDV[Double](featuresStd.map(x => if (x != 0.0) effectiveL1RegParam / x else 0.0)) - } - new OWLQNL($(maxIter), 10, $(tol), effectiveL1Reg) - } - case Huber => - val dim = if ($(fitIntercept)) numFeatures + 2 else numFeatures + 1 - val lowerBounds = BDV[Double](Array.fill(dim)(Double.MinValue)) - // Optimize huber loss in space "\sigma > 0" - lowerBounds(dim - 1) = Double.MinPositiveValue - val upperBounds = BDV[Double](Array.fill(dim)(Double.MaxValue)) - new LBFGSL(lowerBounds, upperBounds, $(maxIter), 10, $(tol)) - } - - val initialValues = $(loss) match { - case SquaredError => - Vectors.zeros(numFeatures) - case Huber => - val dim = if ($(fitIntercept)) numFeatures + 2 else numFeatures + 1 - Vectors.dense(Array.fill(dim)(1.0)) - } - - val states = optimizer.iterations(new CachedDiffFunction(costFun), - initialValues.asBreeze.toDenseVector) - - val (coefficients, intercept, scale, objectiveHistory) = { - /* - Note that in Linear Regression, the objective history (loss + regularization) returned - from optimizer is computed in the scaled space given by the following formula. -
- $$ - L &= 1/2n||\sum_i w_i(x_i - \bar{x_i}) / \hat{x_i} - (y - \bar{y}) / \hat{y}||^2 - + regTerms \\ - $$ -
- */ - val arrayBuilder = mutable.ArrayBuilder.make[Double] - var state: optimizer.State = null - while (states.hasNext) { - state = states.next() - arrayBuilder += state.adjustedValue - } - if (state == null) { - val msg = s"${optimizer.getClass.getName} failed." - logError(msg) - throw new SparkException(msg) - } - - bcFeaturesMean.destroy(blocking = false) - bcFeaturesStd.destroy(blocking = false) - - val parameters = state.x.toArray.clone() - - /* - The coefficients are trained in the scaled space; we're converting them back to - the original space. - */ - val rawCoefficients: Array[Double] = $(loss) match { - case SquaredError => parameters - case Huber => parameters.slice(0, numFeatures) - } - - var i = 0 - val len = rawCoefficients.length - val multiplier = $(loss) match { - case SquaredError => yStd - case Huber => 1.0 - } - while (i < len) { - rawCoefficients(i) *= { if (featuresStd(i) != 0.0) multiplier / featuresStd(i) else 0.0 } - i += 1 - } - - val interceptValue: Double = if ($(fitIntercept)) { - $(loss) match { - case SquaredError => - /* - The intercept of squared error in R's GLMNET is computed using closed form - after the coefficients are converged. See the following discussion for detail. - http://stats.stackexchange.com/questions/13617/how-is-the-intercept-computed-in-glmnet - */ - yMean - dot(Vectors.dense(rawCoefficients), Vectors.dense(featuresMean)) - case Huber => parameters(numFeatures) - } - } else { - 0.0 - } - - val scaleValue: Double = $(loss) match { - case SquaredError => 1.0 - case Huber => parameters.last - } - - (Vectors.dense(rawCoefficients).compressed, interceptValue, scaleValue, arrayBuilder.result()) - } - - if (handlePersistence) instances.unpersist() - - val model = copyValues(new LinearRegressionModel(uid, coefficients, intercept, scale)) - // Handle possible missing or invalid prediction columns - val (summaryModel, predictionColName) = model.findSummaryModelAndPredictionCol() - - val trainingSummary = new LinearRegressionTrainingSummary( - summaryModel.transform(dataset), - predictionColName, - $(labelCol), - $(featuresCol), - model, - Array(0D), - objectiveHistory) - - model.setSummary(Some(trainingSummary)) - instr.logSuccess(model) - model - } - - @Since("1.4.0") - override def copy(extra: ParamMap): LinearRegression = defaultCopy(extra) -} - -@Since("1.6.0") -object LinearRegression extends DefaultParamsReadable[LinearRegression] { - - @Since("1.6.0") - override def load(path: String): LinearRegression = super.load(path) - - /** - * When using `LinearRegression.solver` == "normal", the solver must limit the number of - * features to at most this number. The entire covariance matrix X^T^X will be collected - * to the driver. This limit helps prevent memory overflow errors. - */ - @Since("2.1.0") - val MAX_FEATURES_FOR_NORMAL_SOLVER: Int = WeightedLeastSquares.MAX_NUM_FEATURES - - /** String name for "auto". */ - private[regression] val Auto = "auto" - - /** String name for "normal". */ - private[regression] val Normal = "normal" - - /** String name for "l-bfgs". */ - private[regression] val LBFGS = "l-bfgs" - - /** Set of solvers that LinearRegression supports. */ - private[regression] val supportedSolvers = Array(Auto, Normal, LBFGS) - - /** String name for "squaredError". */ - private[regression] val SquaredError = "squaredError" - - /** String name for "huber". */ - private[regression] val Huber = "huber" - - /** Set of loss function names that LinearRegression supports. */ - private[regression] val supportedLosses = Array(SquaredError, Huber) -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/RandomForestRegressor.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/RandomForestRegressor.scala deleted file mode 100644 index ada1b5e..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/regression/RandomForestRegressor.scala +++ /dev/null @@ -1,312 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.regression - -import org.json4s.{DefaultFormats, JObject} -import org.json4s.JsonDSL._ - -import org.apache.spark.annotation.Since -import org.apache.spark.ml.{PredictionModel, Predictor} -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.linalg.Vector -import org.apache.spark.ml.param.ParamMap -import org.apache.spark.ml.tree._ -import org.apache.spark.ml.tree.impl.RandomForest -import org.apache.spark.ml.util._ -import org.apache.spark.ml.util.DefaultParamsReader.Metadata -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo} -import org.apache.spark.mllib.tree.model.{RandomForestModel => OldRandomForestModel} -import org.apache.spark.rdd.RDD -import org.apache.spark.sql.{DataFrame, Dataset} -import org.apache.spark.sql.functions._ - - -/** - * Random Forest - * learning algorithm for regression. - * It supports both continuous and categorical features. - */ -@Since("1.4.0") -class RandomForestRegressor @Since("1.4.0") (@Since("1.4.0") override val uid: String) - extends Predictor[Vector, RandomForestRegressor, RandomForestRegressionModel] - with RandomForestRegressorParams with DefaultParamsWritable { - - @Since("1.4.0") - def this() = this(Identifiable.randomUID("rfr")) - - // Override parameter setters from parent trait for Java API compatibility. - - // Parameters from TreeRegressorParams: - - /** @group setParam */ - @Since("1.4.0") - override def setMaxDepth(value: Int): this.type = set(maxDepth, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMaxBins(value: Int): this.type = set(maxBins, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value) - - /** @group setParam */ - @Since("1.4.0") - override def setMinInfoGain(value: Double): this.type = set(minInfoGain, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value) - - /** @group expertSetParam */ - @Since("1.4.0") - override def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value) - - /** - * Specifies how often to checkpoint the cached node IDs. - * E.g. 10 means that the cache will get checkpointed every 10 iterations. - * This is only used if cacheNodeIds is true and if the checkpoint directory is set in - * [[org.apache.spark.SparkContext]]. - * Must be at least 1. - * (default = 10) - * @group setParam - */ - @Since("1.4.0") - override def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value) - - /** @group setParam */ - @Since("1.4.0") - override def setImpurity(value: String): this.type = set(impurity, value) - - // Parameters from TreeEnsembleParams: - - /** @group setParam */ - @Since("1.4.0") - override def setSubsamplingRate(value: Double): this.type = set(subsamplingRate, value) - - /** @group setParam */ - @Since("1.4.0") - override def setSeed(value: Long): this.type = set(seed, value) - - // Parameters from RandomForestParams: - - /** @group setParam */ - @Since("1.4.0") - override def setNumTrees(value: Int): this.type = set(numTrees, value) - - /** @group setParam */ - @Since("1.4.0") - override def setFeatureSubsetStrategy(value: String): this.type = - set(featureSubsetStrategy, value) - - override protected def train(dataset: Dataset[_]): RandomForestRegressionModel = { - val categoricalFeatures: Map[Int, Int] = - MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol))) - val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset) - val strategy = - super.getOldStrategy(categoricalFeatures, numClasses = 0, OldAlgo.Regression, getOldImpurity) - - val instr = Instrumentation.create(this, oldDataset) - instr.logParams(labelCol, featuresCol, predictionCol, impurity, numTrees, - featureSubsetStrategy, maxDepth, maxBins, maxMemoryInMB, minInfoGain, - minInstancesPerNode, seed, subsamplingRate, cacheNodeIds, checkpointInterval) - - val trees = RandomForest - .run(oldDataset, strategy, getNumTrees, getFeatureSubsetStrategy, getSeed, Some(instr)) - .map(_.asInstanceOf[DecisionTreeRegressionModel]) - - val numFeatures = oldDataset.first().features.size - val m = new RandomForestRegressionModel(uid, trees, numFeatures) - instr.logSuccess(m) - m - } - - @Since("1.4.0") - override def copy(extra: ParamMap): RandomForestRegressor = defaultCopy(extra) -} - -@Since("1.4.0") -object RandomForestRegressor extends DefaultParamsReadable[RandomForestRegressor]{ - /** Accessor for supported impurity settings: variance */ - @Since("1.4.0") - final val supportedImpurities: Array[String] = TreeRegressorParams.supportedImpurities - - /** Accessor for supported featureSubsetStrategy settings: auto, all, onethird, sqrt, log2 */ - @Since("1.4.0") - final val supportedFeatureSubsetStrategies: Array[String] = - TreeEnsembleParams.supportedFeatureSubsetStrategies - - @Since("2.0.0") - override def load(path: String): RandomForestRegressor = super.load(path) - -} - -/** - * Random Forest model for regression. - * It supports both continuous and categorical features. - * - * @param _trees Decision trees in the ensemble. - * @param numFeatures Number of features used by this model - */ -@Since("1.4.0") -class RandomForestRegressionModel private[ml] ( - override val uid: String, - private val _trees: Array[DecisionTreeRegressionModel], - override val numFeatures: Int) - extends PredictionModel[Vector, RandomForestRegressionModel] - with RandomForestRegressorParams with TreeEnsembleModel[DecisionTreeRegressionModel] - with MLWritable with Serializable { - - require(_trees.nonEmpty, "RandomForestRegressionModel requires at least 1 tree.") - - /** - * Construct a random forest regression model, with all trees weighted equally. - * - * @param trees Component trees - */ - private[ml] def this(trees: Array[DecisionTreeRegressionModel], numFeatures: Int) = - this(Identifiable.randomUID("rfr"), trees, numFeatures) - - @Since("1.4.0") - override def trees: Array[DecisionTreeRegressionModel] = _trees - - // Note: We may add support for weights (based on tree performance) later on. - private lazy val _treeWeights: Array[Double] = Array.fill[Double](_trees.length)(1.0) - - @Since("1.4.0") - override def treeWeights: Array[Double] = _treeWeights - - override protected def transformImpl(dataset: Dataset[_]): DataFrame = { - val bcastModel = dataset.sparkSession.sparkContext.broadcast(this) - val predictUDF = udf { (features: Any) => - bcastModel.value.predict(features.asInstanceOf[Vector]) - } - dataset.withColumn($(predictionCol), predictUDF(col($(featuresCol)))) - } - - override protected def predict(features: Vector): Double = { - // TODO: When we add a generic Bagging class, handle transform there. SPARK-7128 - // Predict average of tree predictions. - // Ignore the weights since all are 1.0 for now. - _trees.map(_.rootNode.predictImpl(features).prediction).sum / getNumTrees - } - - @Since("1.4.0") - override def copy(extra: ParamMap): RandomForestRegressionModel = { - copyValues(new RandomForestRegressionModel(uid, _trees, numFeatures), extra).setParent(parent) - } - - @Since("1.4.0") - override def toString: String = { - s"RandomForestRegressionModel (uid=$uid) with $getNumTrees trees" - } - - /** - * Estimate of the importance of each feature. - * - * Each feature's importance is the average of its importance across all trees in the ensemble - * The importance vector is normalized to sum to 1. This method is suggested by Hastie et al. - * (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.) - * and follows the implementation from scikit-learn. - * - * @see `DecisionTreeRegressionModel.featureImportances` - */ - @Since("1.5.0") - lazy val featureImportances: Vector = TreeEnsembleModel.featureImportances(trees, numFeatures) - - /** (private[ml]) Convert to a model in the old API */ - private[ml] def toOld: OldRandomForestModel = { - new OldRandomForestModel(OldAlgo.Regression, _trees.map(_.toOld)) - } - - @Since("2.0.0") - override def write: MLWriter = - new RandomForestRegressionModel.RandomForestRegressionModelWriter(this) -} - -@Since("2.0.0") -object RandomForestRegressionModel extends MLReadable[RandomForestRegressionModel] { - - @Since("2.0.0") - override def read: MLReader[RandomForestRegressionModel] = new RandomForestRegressionModelReader - - @Since("2.0.0") - override def load(path: String): RandomForestRegressionModel = super.load(path) - - private[RandomForestRegressionModel] - class RandomForestRegressionModelWriter(instance: RandomForestRegressionModel) - extends MLWriter { - - override protected def saveImpl(path: String): Unit = { - val extraMetadata: JObject = Map( - "numFeatures" -> instance.numFeatures, - "numTrees" -> instance.getNumTrees) - EnsembleModelReadWrite.saveImpl(instance, path, sparkSession, extraMetadata) - } - } - - private class RandomForestRegressionModelReader extends MLReader[RandomForestRegressionModel] { - - /** Checked against metadata when loading model */ - private val className = classOf[RandomForestRegressionModel].getName - private val treeClassName = classOf[DecisionTreeRegressionModel].getName - - override def load(path: String): RandomForestRegressionModel = { - implicit val format = DefaultFormats - val (metadata: Metadata, treesData: Array[(Metadata, Node)], treeWeights: Array[Double]) = - EnsembleModelReadWrite.loadImpl(path, sparkSession, className, treeClassName) - val numFeatures = (metadata.metadata \ "numFeatures").extract[Int] - val numTrees = (metadata.metadata \ "numTrees").extract[Int] - - val trees: Array[DecisionTreeRegressionModel] = treesData.map { case (treeMetadata, root) => - val tree = - new DecisionTreeRegressionModel(treeMetadata.uid, root, numFeatures) - DefaultParamsReader.getAndSetParams(tree, treeMetadata) - tree - } - require(numTrees == trees.length, s"RandomForestRegressionModel.load expected $numTrees" + - s" trees based on metadata but found ${trees.length} trees.") - - val model = new RandomForestRegressionModel(metadata.uid, trees, numFeatures) - DefaultParamsReader.getAndSetParams(model, metadata) - model - } - } - - /** Convert a model from the old API */ - private[ml] def fromOld( - oldModel: OldRandomForestModel, - parent: RandomForestRegressor, - categoricalFeatures: Map[Int, Int], - numFeatures: Int = -1): RandomForestRegressionModel = { - require(oldModel.algo == OldAlgo.Regression, "Cannot convert RandomForestModel" + - s" with algo=${oldModel.algo} (old API) to RandomForestRegressionModel (new API).") - val newTrees = oldModel.trees.map { tree => - // parent for each tree is null since there is no good way to set this. - DecisionTreeRegressionModel.fromOld(tree, null, categoricalFeatures) - } - val uid = if (parent != null) parent.uid else Identifiable.randomUID("rfr") - new RandomForestRegressionModel(uid, newTrees, numFeatures) - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/stat/Correlation.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/stat/Correlation.scala deleted file mode 100644 index d84f291..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/stat/Correlation.scala +++ /dev/null @@ -1,93 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.stat - -import scala.collection.JavaConverters._ - -import org.apache.spark.annotation.{Experimental, Since} -import org.apache.spark.ml.linalg.{SQLDataTypes, Vector} -import org.apache.spark.mllib.linalg.{Vectors => OldVectors} -import org.apache.spark.mllib.stat.{Statistics => OldStatistics} -import org.apache.spark.sql.{DataFrame, Dataset, Row} -import org.apache.spark.sql.types.{StructField, StructType} - -/** - * API for correlation functions in MLlib, compatible with DataFrames and Datasets. - * - * The functions in this package generalize the functions in [[org.apache.spark.sql.Dataset#stat]] - * to spark.ml's Vector types. - */ -@Since("2.2.0") -@Experimental -object Correlation { - - /** - * :: Experimental :: - * Compute the correlation matrix for the input Dataset of Vectors using the specified method. - * Methods currently supported: `pearson` (default), `spearman`. - * - * @param dataset A dataset or a dataframe - * @param column The name of the column of vectors for which the correlation coefficient needs - * to be computed. This must be a column of the dataset, and it must contain - * Vector objects. - * @param method String specifying the method to use for computing correlation. - * Supported: `pearson` (default), `spearman` - * @return A dataframe that contains the correlation matrix of the column of vectors. This - * dataframe contains a single row and a single column of name - * '$METHODNAME($COLUMN)'. - * @throws IllegalArgumentException if the column is not a valid column in the dataset, or if - * the content of this column is not of type Vector. - * - * Here is how to access the correlation coefficient: - * {{{ - * val data: Dataset[Vector] = ... - * val Row(coeff: Matrix) = Correlation.corr(data, "value").head - * // coeff now contains the Pearson correlation matrix. - * }}} - * - * @note For Spearman, a rank correlation, we need to create an RDD[Double] for each column - * and sort it in order to retrieve the ranks and then join the columns back into an RDD[Vector], - * which is fairly costly. Cache the input Dataset before calling corr with `method = "spearman"` - * to avoid recomputing the common lineage. - */ - @Since("2.2.0") - def corr(dataset: Dataset[_], column: String, method: String): DataFrame = { - val rdd = dataset.select(column).rdd.map { - case Row(v: Vector) => OldVectors.fromML(v) - } - val oldM = OldStatistics.corr(rdd, method) - val name = s"$method($column)" - val schema = StructType(Array(StructField(name, SQLDataTypes.MatrixType, nullable = false))) - dataset.sparkSession.createDataFrame(Seq(Row(oldM.asML)).asJava, schema) - } - - /** - * Compute the Pearson correlation matrix for the input Dataset of Vectors. - */ - @Since("2.2.0") - def corr(dataset: Dataset[_], column: String): DataFrame = { - corr(dataset, column, "pearson") - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/DecisionForest.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/DecisionForest.scala deleted file mode 100644 index ebce220..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/DecisionForest.scala +++ /dev/null @@ -1,1275 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree.impl - -import java.io.IOException - -import scala.collection.mutable -import scala.util.Random - -import org.apache.spark.internal.Logging -import org.apache.spark.ml.classification.DecisionTreeClassificationModel -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.regression.DecisionTreeRegressionModel -import org.apache.spark.ml.tree._ -import org.apache.spark.ml.tree.impl.RandomForest.NodeIndexInfo -import org.apache.spark.ml.util.Instrumentation -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, Strategy => OldStrategy} -import org.apache.spark.mllib.tree.impurity.ImpurityCalculator -import org.apache.spark.mllib.tree.model.ImpurityStats -import org.apache.spark.rdd.RDD -import org.apache.spark.util.random.{SamplingUtils, XORShiftRandom} - -/** - * ALGORITHM - * - * This is a sketch of the algorithm to help new developers. - * - * The algorithm partitions data by instances (rows). - * On each iteration, the algorithm splits a set of nodes. In order to choose the best split - * for a given node, sufficient statistics are collected from the distributed data. - * For each node, the statistics are collected to some worker node, and that worker selects - * the best split. - * - * This setup requires discretization of continuous features. This binning is done in the - * findSplits() method during initialization, after which each continuous feature becomes - * an ordered discretized feature with at most maxBins possible values. - * - * The main loop in the algorithm operates on a queue of nodes (nodeStack). These nodes - * lie at the periphery of the tree being trained. If multiple trees are being trained at once, - * then this queue contains nodes from all of them. Each iteration works roughly as follows: - * On the master node: - * - Some number of nodes are pulled off of the queue (based on the amount of memory - * required for their sufficient statistics). - * - For random forests, if featureSubsetStrategy is not "all," then a subset of candidate - * features are chosen for each node. See method selectNodesToSplit(). - * On worker nodes, via method findBestSplits(): - * - The worker makes one pass over its subset of instances. - * - For each (tree, node, feature, split) tuple, the worker collects statistics about - * splitting. Note that the set of (tree, node) pairs is limited to the nodes selected - * from the queue for this iteration. The set of features considered can also be limited - * based on featureSubsetStrategy. - * - For each node, the statistics for that node are aggregated to a particular worker - * via reduceByKey(). The designated worker chooses the best (feature, split) pair, - * or chooses to stop splitting if the stopping criteria are met. - * On the master node: - * - The master collects all decisions about splitting nodes and updates the model. - * - The updated model is passed to the workers on the next iteration. - * This process continues until the node queue is empty. - * - * Most of the methods in this implementation support the statistics aggregation, which is - * the heaviest part of the computation. In general, this implementation is bound by either - * the cost of statistics computation on workers or by communicating the sufficient statistics. - */ -private[spark] object DecisionForest extends Logging { - - /** - * Train a random forest. - * - * @param input Training data: RDD of `LabeledPoint` - * @return an unweighted set of trees - */ - def run( - input: RDD[LabeledPoint], - strategy: OldStrategy, - numTrees: Int, - featureSubsetStrategy: String, - seed: Long, - instr: Option[Instrumentation[_]], - parentUID: Option[String] = None): Array[DecisionTreeModel] = { - val exParams = DTUtils.parseExtraParams(input, strategy) - runX(input, strategy, numTrees, featureSubsetStrategy, seed, instr, exParams, parentUID) - } - - /** - * Train a random forest. - * - * @param input Training data: RDD of `LabeledPoint` - * @return an unweighted set of trees - */ - def runX( - input: RDD[LabeledPoint], - strategy: OldStrategy, - numTrees: Int, - featureSubsetStrategy: String, - seed: Long, - instr: Option[Instrumentation[_]], - extraParams: DFExtraParams, - parentUID: Option[String] = None): Array[DecisionTreeModel] = { - - DecisionForestInfo.timerResult = "" - val timer = new TimeTracker() - - timer.start("total") - - timer.start("init") - - val binnedFeaturesType = BinnedFeaturesDataType.withName(extraParams.rfParams.featuresDataType) - val retaggedInput = input.retag(classOf[LabeledPoint]) - // featureSubsetStrategy: The number of features to consider for splits at each tree node. - // featureSubsetStrategy: default value is "auto" for random forest. - // impurity: default value is "gini" for random forest. - val metadata = - DecisionTreeMetadata.buildMetadata(retaggedInput, strategy, numTrees, featureSubsetStrategy) - logWarning(s"decisionTreeMetadata details: ${metadata.numFeatures}," + - s" ${metadata.numExamples}, ${metadata.numClasses: Int}, ${metadata.maxBins: Int}," + - s" ${metadata.featureArity}, ${metadata.unorderedFeatures.mkString("[", ";", "]")}," + - s" ${metadata.impurity}, ${metadata.quantileStrategy}, ${metadata.maxDepth: Int}," + - s" ${metadata.minInstancesPerNode: Int}, ${metadata.minInfoGain: Double}," + - s" ${metadata.numTrees: Int}, ${metadata.numFeaturesPerNode: Int},${binnedFeaturesType}") - instr match { - case Some(instrumentation) => - instrumentation.logNumFeatures(metadata.numFeatures) - instrumentation.logNumClasses(metadata.numClasses) - case None => - logInfo("numFeatures: " + metadata.numFeatures) - logInfo("numClasses: " + metadata.numClasses) - } - - // Find the splits and the corresponding bins (interval between the splits) using a sample - // of the input data. - timer.start("findSplits") - val splits = findSplits(retaggedInput, metadata, seed, extraParams.numFeaturesOptFindSplits) - timer.stop("findSplits") - logDebug("numBins: feature: number of bins") - logDebug(Range(0, metadata.numFeatures).map { featureIndex => - s"\t$featureIndex\t${metadata.numBins(featureIndex)}" - }.mkString("\n")) - - // Bin feature values (TreePoint representation). - // Cache input RDD for speedup during multiple passes. - val treeInput = TreePointY.convertToTreeRDD(retaggedInput, splits, metadata, binnedFeaturesType) - - val withReplacement = numTrees > 1 - - // Default value of subsamplingRate is 1 for random forest. - val baggedInputOri = BaggedPoint.convertToBaggedRDD(treeInput, strategy.subsamplingRate, - numTrees, withReplacement, seed) - - val baggedInput = DTUtils.transformBaggedRDD(baggedInputOri, extraParams) - - // depth of the decision tree - val maxDepth = strategy.maxDepth - require(maxDepth <= 30, - s"DecisionTree currently only supports maxDepth <= 30, but was given maxDepth = $maxDepth.") - - // Max memory usage for aggregates - // TODO: Calculate memory usage more precisely. - val maxMemoryUsage: Long = strategy.maxMemoryInMB * 1024L * 1024L - logDebug("max memory usage for aggregates = " + maxMemoryUsage + " bytes.") - - /* - * The main idea here is to perform group-wise training of the decision tree nodes thus - * reducing the passes over the data from (# nodes) to (# nodes / maxNumberOfNodesPerGroup). - * Each data sample is handled by a particular node (or it reaches a leaf and is not used - * in lower levels). - */ - - // Create an RDD of node Id cache. - // At first, all the rows belong to the root nodes (node Id == 1). - // Default value of useNodeIdCache is false for random forest. - val nodeIdCache = if (strategy.useNodeIdCache) { - Some(NodeIdCache.init( - data = baggedInput, - numTrees = numTrees, - checkpointInterval = strategy.checkpointInterval, - initVal = 1)) - } else { - None - } - - /* - Stack of nodes to train: (treeIndex, node) - The reason this is a stack is that we train many trees at once, but we want to focus on - completing trees, rather than training all simultaneously. If we are splitting nodes from - 1 tree, then the new nodes to split will be put at the top of this stack, so we will continue - training the same tree in the next iteration. This focus allows us to send fewer trees to - workers on each iteration; see topNodesForGroup below. - */ - val nodeStack = new mutable.ArrayStack[(Int, LearningNode)] - val localTrainingStack = new mutable.ListBuffer[LocalTrainingTask] - - val rng = new Random() - rng.setSeed(seed) - - // Allocate and queue root nodes. - val topNodes = Array.fill[LearningNode](numTrees)(LearningNode.emptyNode(nodeIndex = 1)) - Range(0, numTrees).foreach(treeIndex => nodeStack.push((treeIndex, topNodes(treeIndex)))) - - timer.stop("init") - - while (nodeStack.nonEmpty) { - // Collect some nodes to split, and choose features for each node (if subsampling). - // Each group of nodes may come from one or multiple trees, and at multiple levels. - // nodesForGroup: treeIndex --> learningNodes in tree - // treeToNodeToIndexInfo: treeIndex --> (global) learningNodes index in tree - // --> (node index in group, feature indices). - val (nodesForGroup, treeToNodeToIndexInfo) = - DecisionForest.selectNodesToSplit(nodeStack, maxMemoryUsage, metadata, rng) - // Sanity check (should never occur): - assert(nodesForGroup.nonEmpty, - s"DecisionForest selected empty nodesForGroup. Error for unknown reason.") - - // Only send trees to worker if they contain nodes being split this iteration. - // topNodesForGroup: treeIndex --> top node in tree - val topNodesForGroup: Map[Int, LearningNode] = - nodesForGroup.keys.map(treeIdx => treeIdx -> topNodes(treeIdx)).toMap - - // Choose node splits, and enqueue new nodes as needed. - timer.start("findBestSplits") - DecisionForest.findBestSplits(baggedInput, metadata, topNodesForGroup, nodesForGroup, - treeToNodeToIndexInfo, splits, (nodeStack, localTrainingStack), - extraParams, timer, nodeIdCache) - timer.stop("findBestSplits") - } - - baggedInput.unpersist() - - timer.stop("total") - - logInfo("Internal timing for DecisionTree:") - logInfo(s"$timer") - DecisionForestInfo.timerResult = timer.toString() - - // Delete any remaining checkpoints used for node Id cache. - if (nodeIdCache.nonEmpty) { - try { - nodeIdCache.get.deleteAllCheckpoints() - } catch { - case e: IOException => - logWarning(s"delete all checkpoints failed. Error reason: ${e.getMessage}") - } - } - - val numFeatures = metadata.numFeatures - - parentUID match { - case Some(uid) => - if (strategy.algo == OldAlgo.Classification) { - topNodes.map { rootNode => - new DecisionTreeClassificationModel(uid, rootNode.toNode, numFeatures, - strategy.getNumClasses) - } - } else { - topNodes.map { rootNode => - new DecisionTreeRegressionModel(uid, rootNode.toNode, numFeatures) - } - } - case None => - if (strategy.algo == OldAlgo.Classification) { - topNodes.map { rootNode => - new DecisionTreeClassificationModel(rootNode.toNode, numFeatures, - strategy.getNumClasses) - } - } else { - topNodes.map { rootNode => - new DecisionTreeRegressionModel(rootNode.toNode, numFeatures) - } - } - } - } - - /** - * Helper for binSeqOp, for data which can contain a mix of ordered and unordered features. - * - * For ordered features, a single bin is updated. - * For unordered features, bins correspond to subsets of categories; either the left or right bin - * for each subset is updated. - * - * @param agg Array storing aggregate calculation, with a set of sufficient statistics for - * each (feature, bin). - * @param treePoint Data point being aggregated. - * @param splits possible splits indexed (numFeatures)(numSplits) - * @param unorderedFeatures Set of indices of unordered features. - * @param instanceWeight Weight (importance) of instance in dataset. - */ - private def mixedBinSeqOp( - agg: DTStatsAggregator, - treePoint: TreePointY, - splits: Array[Array[Split]], - unorderedFeatures: Set[Int], - instanceWeight: Int, - featuresForNode: Option[Array[Int]]): Unit = { - val numFeaturesPerNode = if (featuresForNode.nonEmpty) { - // Use subsampled features - featuresForNode.get.length - } else { - // Use all features - agg.metadata.numFeatures - } - // Iterate over features. - var featureIndexIdx = 0 - while (featureIndexIdx < numFeaturesPerNode) { - val featureIndex = if (featuresForNode.nonEmpty) { - featuresForNode.get.apply(featureIndexIdx) - } else { - featureIndexIdx - } - // TODO: we can use AggUpdateUtils to update histogram. - if (unorderedFeatures.contains(featureIndex)) { - // Unordered feature - val featureValue = treePoint.binnedFeatures.get(featureIndex) - val leftNodeFeatureOffset = agg.getFeatureOffset(featureIndexIdx) - // Update the left or right bin for each split. - val numSplits = agg.metadata.numSplits(featureIndex) - val featureSplits = splits(featureIndex) - var splitIndex = 0 - while (splitIndex < numSplits) { - if (featureSplits(splitIndex).shouldGoLeft(featureValue, featureSplits)) { - agg.featureUpdate(leftNodeFeatureOffset, splitIndex, treePoint.label, instanceWeight) - } - splitIndex += 1 - } - } else { - // Ordered feature - val binIndex = treePoint.binnedFeatures.get(featureIndex) - agg.update(featureIndexIdx, binIndex, treePoint.label, instanceWeight) - } - featureIndexIdx += 1 - } - } - - /** - * Helper for binSeqOp, for regression and for classification with only ordered features. - * - * For each feature, the sufficient statistics of one bin are updated. - * - * @param agg Array storing aggregate calculation, with a set of sufficient statistics for - * each (feature, bin). - * @param treePoint Data point being aggregated. - * @param instanceWeight Weight (importance) of instance in dataset. - */ - private def orderedBinSeqOp( - agg: DTStatsAggregator, - treePoint: TreePointY, - instanceWeight: Int, - featuresForNode: Option[Array[Int]]): Unit = { - val label = treePoint.label - - // Iterate over features. - if (featuresForNode.nonEmpty) { - // Use subsampled features - var featureIndexIdx = 0 - while (featureIndexIdx < featuresForNode.get.length) { - val binIndex = treePoint.binnedFeatures.get(featuresForNode.get.apply(featureIndexIdx)) - agg.update(featureIndexIdx, binIndex, label, instanceWeight) - featureIndexIdx += 1 - } - } else { - // Use all features - val numFeatures = agg.metadata.numFeatures - var featureIndex = 0 - while (featureIndex < numFeatures) { - val binIndex = treePoint.binnedFeatures.get(featureIndex) - agg.update(featureIndex, binIndex, label, instanceWeight) - featureIndex += 1 - } - } - } - - /** - * Given a group of nodes, this finds the best split for each node. - * - * @param input Training data: RDD of [[TreePointX]] - * @param metadata Learning and dataset metadata - * @param topNodesForGroup For each tree in group, tree index -> root node. - * Used for matching instances with nodes. - * @param nodesForGroup Mapping: treeIndex --> nodes to be split in tree - * @param treeToNodeToIndexInfo Mapping: treeIndex --> (global) learningNodes index in tree - * --> (node index in group, feature indices) - * feature indices: probably parts of full features. - * Mapping: treeIndex --> nodeIndex --> nodeIndexInfo, - * where nodeIndexInfo stores the index in the group and the - * feature subsets (if using feature subsets). - * @param splits possible splits for all features, indexed (numFeatures)(numSplits) - * @param stacks Queue of nodes to split, with values (treeIndex, node). - * Updated with new non-leaf nodes which are created. - * @param nodeIdCache Node Id cache containing an RDD of Array[Int] where - * each value in the array is the data point's node Id - * for a corresponding tree. This is used to prevent the need - * to pass the entire tree to the executors during - * the node stat aggregation phase. - * - */ - private[tree] def findBestSplits( - input: RDD[BaggedPoint[TreePointY]], - metadata: DecisionTreeMetadata, - topNodesForGroup: Map[Int, LearningNode], - nodesForGroup: Map[Int, Array[LearningNode]], - treeToNodeToIndexInfo: Map[Int, Map[Int, NodeIndexInfo]], - splits: Array[Array[Split]], - stacks: (mutable.ArrayStack[(Int, LearningNode)], mutable.ListBuffer[LocalTrainingTask]), - extraParams: DFExtraParams, - timer: TimeTracker = new TimeTracker, - nodeIdCache: Option[NodeIdCache] = None): Unit = { - - /* - * The high-level descriptions of the best split optimizations are noted here. - * - * *Group-wise training* - * We perform bin calculations for groups of nodes to reduce the number of - * passes over the data. Each iteration requires more computation and storage, - * but saves several iterations over the data. - * - * *Bin-wise computation* - * We use a bin-wise best split computation strategy instead of a straightforward best split - * computation strategy. Instead of analyzing each sample for contribution to the left/right - * child node impurity of every split, we first categorize each feature of a sample into a - * bin. We exploit this structure to calculate aggregates for bins and then use these aggregates - * to calculate information gain for each split. - * - * *Aggregation over partitions* - * Instead of performing a flatMap/reduceByKey operation, we exploit the fact that we know - * the number of splits in advance. Thus, we store the aggregates (at the appropriate - * indices) in a single array for all bins and rely upon the RDD aggregate method to - * drastically reduce the communication overhead. - */ - - val bcVariables = if (null == extraParams.rfParams) false else extraParams.rfParams.bcVariables - val (nodeStack, _) = stacks - /** numNodes: Number of nodes in this group */ - val numNodes = nodesForGroup.values.map(_.length).sum - logDebug("numNodes = " + numNodes) - logDebug("numFeatures = " + metadata.numFeatures) - logDebug("numClasses = " + metadata.numClasses) - logDebug("isMulticlass = " + metadata.isMulticlass) - logDebug("isMulticlassWithCategoricalFeatures = " + - metadata.isMulticlassWithCategoricalFeatures) - logDebug("using nodeIdCache = " + nodeIdCache.nonEmpty.toString) - - val groupInfo = - DTUtils.getGroupInfo(numNodes, treeToNodeToIndexInfo, extraParams, nodesForGroup) - - val splitsBc = if (bcVariables) Some(input.sparkContext.broadcast(splits)) else Option.empty - val splitsOption = if (bcVariables) Option.empty else Some(splits) - - /** - * Performs a sequential aggregation over a partition for a particular tree and node. - * - * For each feature, the aggregate sufficient statistics are updated for the relevant - * bins. - * - * @param treeIndex Index of the tree that we want to perform aggregation for. - * @param nodeInfo The node info for the tree node. - * @param agg Array storing aggregate calculation, with a set of sufficient statistics - * for each (node, feature, bin). - * @param baggedPoint Data point being aggregated. - */ - def nodeBinSeqOp( - treeIndex: Int, - nodeInfo: NodeIndexInfo, - agg: Array[DTStatsAggregator], - splitsBcv: Array[Array[Split]], - baggedPoint: BaggedPoint[TreePointY], - sampleId: Short = 0): Unit = { - if (DTUtils.isValidNodeInfo(nodeInfo, agg, groupInfo, baggedPoint, sampleId)) { - val aggNodeIndex = nodeInfo.nodeIndexInGroup - val featuresForNode = nodeInfo.featureSubset - val instanceWeight = baggedPoint.subsampleWeights(treeIndex) - if (metadata.unorderedFeatures.isEmpty) { - orderedBinSeqOp(agg(aggNodeIndex), baggedPoint.datum, instanceWeight, featuresForNode) - } else { - mixedBinSeqOp(agg(aggNodeIndex), baggedPoint.datum, splitsBcv, - metadata.unorderedFeatures, instanceWeight, featuresForNode) - } - agg(aggNodeIndex).updateParent(baggedPoint.datum.label, instanceWeight) - } - } - - /** - * Performs a sequential aggregation over a partition. - * - * Each data point contributes to one node. For each feature, - * the aggregate sufficient statistics are updated for the relevant bins. - * - * @param agg Array storing aggregate calculation, with a set of sufficient statistics for - * each (node, feature, bin). - * @param baggedPoint Data point being aggregated. - * @return agg - */ - def binSeqOp( - agg: Array[DTStatsAggregator], - baggedPoint: BaggedPoint[TreePointY], - splitsBcv: Array[Array[Split]], - sampleId: Short): Array[DTStatsAggregator] = { - // TODO: treeToNodeToIndexInfo and topNodesForGroup(include sub-nodes) weren't broadcast. - treeToNodeToIndexInfo.foreach { case (treeIndex, nodeIndexToInfo) => - if (DTUtils.isSubSampled(baggedPoint, groupInfo, treeIndex, sampleId)) { - val nodeIndex = - topNodesForGroup(treeIndex).predictImpl(baggedPoint.datum.binnedFeatures, splitsBcv) - nodeBinSeqOp(treeIndex, nodeIndexToInfo.getOrElse(nodeIndex, null), - agg, splitsBcv, baggedPoint, sampleId) - } - } - agg - } - - /** - * Do the same thing as binSeqOp, but with nodeIdCache. - */ - def binSeqOpWithNodeIdCache( - agg: Array[DTStatsAggregator], - splitsBcv: Array[Array[Split]], - dataPoint: (BaggedPoint[TreePointY], Array[Int])): Array[DTStatsAggregator] = { - treeToNodeToIndexInfo.foreach { case (treeIndex, nodeIndexToInfo) => - val baggedPoint = dataPoint._1 - val nodeIdCache = dataPoint._2 - val nodeIndex = nodeIdCache(treeIndex) - nodeBinSeqOp(treeIndex, nodeIndexToInfo.getOrElse(nodeIndex, null), - agg, splitsBcv, baggedPoint) - } - - agg - } - - /** - * Get node index in group --> features indices map, - * which is a short cut to find feature indices for a node given node index in group. - */ - def getNodeToFeatures( - treeToNodeToIndexInfo: Map[Int, Map[Int, NodeIndexInfo]]): Option[Map[Int, Array[Int]]] = { - if (!metadata.subsamplingFeatures) { - None - } else { - val mutableNodeToFeatures = new mutable.HashMap[Int, Array[Int]]() - treeToNodeToIndexInfo.values.foreach { nodeIdToNodeInfo => - nodeIdToNodeInfo.values.foreach { nodeIndexInfo => - assert(nodeIndexInfo.featureSubset.isDefined) - mutableNodeToFeatures(nodeIndexInfo.nodeIndexInGroup) = nodeIndexInfo.featureSubset.get - } - } - Some(mutableNodeToFeatures.toMap) - } - } - - /** array of nodes to train indexed by node index in group */ - val nodes = new Array[LearningNode](numNodes) - nodesForGroup.foreach { case (treeIndex, nodesForTree) => - nodesForTree.foreach { node => - nodes(treeToNodeToIndexInfo(treeIndex)(node.id).nodeIndexInGroup) = node - } - } - - // Calculate best splits for all nodes in the group - timer.start("chooseSplits") - - // In each partition, iterate all instances and compute aggregate stats for each node, - // yield a (nodeIndex, nodeAggregateStats) pair for each node. - // After a `reduceByKey` operation, - // stats of a node will be shuffled to a particular partition and be combined together, - // then best splits for nodes are found there. - // Finally, only best Splits for nodes are collected to driver to construct decision tree. - // nodeToFeatures: node index in group -> selected feature indexes - val nodeToFeatures = getNodeToFeatures(treeToNodeToIndexInfo) - val nodeToFeaturesBc = input.sparkContext.broadcast(nodeToFeatures) - - /** partitionAggregates RDD: node index in group --> nodeStats */ - val partitionAggregates: RDD[(Int, DTStatsAggregator)] = if (nodeIdCache.nonEmpty) { - input.zip(nodeIdCache.get.nodeIdsForInstances).mapPartitions { points => - // Construct a nodeStatsAggregators array to hold node aggregate stats, - // each node will have a nodeStatsAggregator - val nodeStatsAggregators = Array.tabulate(numNodes) { nodeIndex => - val featuresForNode = nodeToFeaturesBc.value.map { nodeToFeatures => - nodeToFeatures(nodeIndex) - } - new DTStatsAggregator(metadata, featuresForNode) - } - - val splitsBcv = if (bcVariables) splitsBc.get.value else splitsOption.get - // iterator all instances in current partition and update aggregate stats - points.foreach(binSeqOpWithNodeIdCache(nodeStatsAggregators, splitsBcv, _)) - - // transform nodeStatsAggregators array to (nodeIndex, nodeAggregateStats) pairs, - // which can be combined with other partition using `reduceByKey` - nodeStatsAggregators.view.zipWithIndex.map(_.swap).iterator - } - } else { - input.mapPartitions { points => - val (firstPointOption, nodeStatsAggregators) = - DTUtils.initNodeStatsAgg(numNodes, nodeToFeaturesBc, metadata, points, groupInfo) - if (firstPointOption.isEmpty) { - Iterator.empty - } else { - val firstPoint = firstPointOption.get - val sampleId = firstPoint.sampleId - - val splitsBcv = if (bcVariables) splitsBc.get.value else splitsOption.get - binSeqOp(nodeStatsAggregators, firstPoint, splitsBcv, sampleId) - // iterator all instances in current partition and update aggregate stats - points.foreach(binSeqOp(nodeStatsAggregators, _, splitsBcv, sampleId)) - - // transform nodeStatsAggregators array to (nodeIndex, nodeAggregateStats) pairs, - // which can be combined with other partition using `reduceByKey` - nodeStatsAggregators.view.zipWithIndex - .filter(v => RFUtils.isValidAgg(v._1)).map(_.swap).iterator - } - } - } - - val reducedAggregates = if (extraParams.useDFCollPtner) { - val partitioner = new DFCollectionPartitioner(input.partitions.length, - DTUtils.maxNumParallelThreads(), metadata.numFeatures) - partitionAggregates.reduceByKey(partitioner, (a, b) => a.merge(b)) - } else { - logInfo("DFCollectionPartitioner discarded.") - partitionAggregates.reduceByKey((a, b) => a.merge(b)) - } - val nodeToBestSplits = reducedAggregates.map { - case (nodeIndex, aggStats) => - val featuresForNode = nodeToFeaturesBc.value.flatMap { nodeToFeatures => - Some(nodeToFeatures(nodeIndex)) - } - - val splitsBcv = if (bcVariables) splitsBc.get.value else splitsOption.get - // find best split for each node - val (split: Split, stats: ImpurityStats) = - binsToBestSplit(aggStats, splitsBcv, featuresForNode, nodes(nodeIndex)) - (nodeIndex, (split, stats)) - }.collectAsMap() - - timer.stop("chooseSplits") - - val nodeIdUpdaters = if (nodeIdCache.nonEmpty) { - Array.fill[mutable.Map[Int, NodeIndexUpdaterRaw]]( - metadata.numTrees)(mutable.Map[Int, NodeIndexUpdaterRaw]()) - } else { - null - } - // Iterate over all nodes in this group. - nodesForGroup.foreach { case (treeIndex, nodesForTree) => - nodesForTree.foreach { node => - val nodeIndex = node.id - val nodeLevel = LearningNode.indexToLevel(nodeIndex) - val nodeInfo = treeToNodeToIndexInfo(treeIndex)(nodeIndex) - val aggNodeIndex = nodeInfo.nodeIndexInGroup - val (split: Split, stats: ImpurityStats) = - nodeToBestSplits(aggNodeIndex) - logDebug("best split = " + split) - - // Extract info for this node. Create children if not leaf. - val isLeaf = - (stats.gain <= 0) || (nodeLevel == metadata.maxDepth) - node.isLeaf = isLeaf - node.stats = stats - logDebug("Node = " + node) - - if (!isLeaf) { - node.split = Some(split) - val childIsLeaf = (nodeLevel + 1) == metadata.maxDepth - val leftChildIsLeaf = childIsLeaf || (stats.leftImpurity == 0.0) - val rightChildIsLeaf = childIsLeaf || (stats.rightImpurity == 0.0) - node.leftChild = Some(LearningNode(LearningNode.leftChildIndex(nodeIndex), - leftChildIsLeaf, ImpurityStats.getEmptyImpurityStats(stats.leftImpurityCalculator))) - node.rightChild = Some(LearningNode(LearningNode.rightChildIndex(nodeIndex), - rightChildIsLeaf, ImpurityStats.getEmptyImpurityStats(stats.rightImpurityCalculator))) - - if (nodeIdCache.nonEmpty) { - val nodeIndexUpdater = NodeIndexUpdaterRaw( - split = split, - nodeIndex = nodeIndex) - nodeIdUpdaters(treeIndex).put(nodeIndex, nodeIndexUpdater) - } - - // enqueue left child and right child if they are not leaves - if (!leftChildIsLeaf) { - nodeStack.push((treeIndex, node.leftChild.get)) - } - if (!rightChildIsLeaf) { - nodeStack.push((treeIndex, node.rightChild.get)) - } - - logDebug("leftChildIndex = " + node.leftChild.get.id + - ", impurity = " + stats.leftImpurity) - logDebug("rightChildIndex = " + node.rightChild.get.id + - ", impurity = " + stats.rightImpurity) - } - } - } - - if (nodeIdCache.nonEmpty) { - // Update the cache if needed. - nodeIdCache.get.updateNodeIndicesY(input, nodeIdUpdaters, splits) - } - } - - /** - * Calculate the impurity statistics for a given (feature, split) based upon left/right - * aggregates. - * - * @param stats the recycle impurity statistics for this feature's all splits, - * only 'impurity' and 'impurityCalculator' are valid between each iteration - * @param leftImpurityCalculator left node aggregates for this (feature, split) - * @param rightImpurityCalculator right node aggregate for this (feature, split) - * @param metadata learning and dataset metadata for DecisionTree - * @return Impurity statistics for this (feature, split) - */ - private def calculateImpurityStats( - stats: ImpurityStats, - leftImpurityCalculator: ImpurityCalculator, - rightImpurityCalculator: ImpurityCalculator, - metadata: DecisionTreeMetadata): ImpurityStats = { - - val parentImpurityCalculator: ImpurityCalculator = if (stats == null) { - leftImpurityCalculator.copy.add(rightImpurityCalculator) - } else { - stats.impurityCalculator - } - - val impurity: Double = if (stats == null) { - parentImpurityCalculator.calculate() - } else { - stats.impurity - } - - val leftCount = leftImpurityCalculator.count - val rightCount = rightImpurityCalculator.count - - val totalCount = leftCount + rightCount - - // If left child or right child doesn't satisfy minimum instances per node, - // then this split is invalid, return invalid information gain stats. - if ((leftCount < metadata.minInstancesPerNode) || - (rightCount < metadata.minInstancesPerNode)) { - return ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator) - } - - val leftImpurity = leftImpurityCalculator.calculate() // Note: This equals 0 if count = 0 - val rightImpurity = rightImpurityCalculator.calculate() - - val leftWeight = leftCount / totalCount.toDouble - val rightWeight = rightCount / totalCount.toDouble - - val gain = impurity - leftWeight * leftImpurity - rightWeight * rightImpurity - - // if information gain doesn't satisfy minimum information gain, - // then this split is invalid, return invalid information gain stats. - if (gain < metadata.minInfoGain) { - return ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator) - } - - new ImpurityStats(gain, impurity, parentImpurityCalculator, - leftImpurityCalculator, rightImpurityCalculator) - } - - /** - * Find the best split for a node. - * - * @param binAggregates Bin statistics. - * @return tuple for best split: (Split, information gain, prediction at node) - */ - private[tree] def binsToBestSplit( - binAggregates: DTStatsAggregator, - splits: Array[Array[Split]], - featuresForNode: Option[Array[Int]], - node: LearningNode): (Split, ImpurityStats) = { - - // Calculate InformationGain and ImpurityStats if current node is top node - val level = LearningNode.indexToLevel(node.id) - var gainAndImpurityStats: ImpurityStats = if (level == 0) { - null - } else { - node.stats - } - - val validFeatureSplits = - Range(0, binAggregates.metadata.numFeaturesPerNode).view.map { featureIndexIdx => - featuresForNode.map(features => (featureIndexIdx, features(featureIndexIdx))) - .getOrElse((featureIndexIdx, featureIndexIdx)) - }.withFilter { case (_, featureIndex) => - binAggregates.metadata.numSplits(featureIndex) != 0 - } - - // For each (feature, split), calculate the gain, and select the best (feature, split). - val splitsAndImpurityInfo = - validFeatureSplits.map { case (featureIndexIdx, featureIndex) => - val numSplits = binAggregates.metadata.numSplits(featureIndex) - if (binAggregates.metadata.isContinuous(featureIndex)) { - // Cumulative sum (scanLeft) of bin statistics. - // Afterwards, binAggregates for a bin is the sum of aggregates for - // that bin + all preceding bins. - val nodeFeatureOffset = binAggregates.getFeatureOffset(featureIndexIdx) - var splitIndex = 0 - while (splitIndex < numSplits) { - binAggregates.mergeForFeature(nodeFeatureOffset, splitIndex + 1, splitIndex) - splitIndex += 1 - } - // Find best split. - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { case splitIdx => - val leftChildStats = binAggregates.getImpurityCalculator(nodeFeatureOffset, splitIdx) - val rightChildStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, numSplits) - rightChildStats.subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIdx, gainAndImpurityStats) - }.maxBy(_._2.gain) - (splits(featureIndex)(bestFeatureSplitIndex), bestFeatureGainStats) - } else if (binAggregates.metadata.isUnordered(featureIndex)) { - // Unordered categorical feature - val leftChildOffset = binAggregates.getFeatureOffset(featureIndexIdx) - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { splitIndex => - val leftChildStats = binAggregates.getImpurityCalculator(leftChildOffset, splitIndex) - val rightChildStats = binAggregates.getParentImpurityCalculator() - .subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIndex, gainAndImpurityStats) - }.maxBy(_._2.gain) - (splits(featureIndex)(bestFeatureSplitIndex), bestFeatureGainStats) - } else { - // Ordered categorical feature - val nodeFeatureOffset = binAggregates.getFeatureOffset(featureIndexIdx) - val numCategories = binAggregates.metadata.numBins(featureIndex) - - /* Each bin is one category (feature value). - * The bins are ordered based on centroidForCategories, and this ordering determines which - * splits are considered. (With K categories, we consider K - 1 possible splits.) - * - * centroidForCategories is a list: (category, centroid) - */ - val centroidForCategories = Range(0, numCategories).map { case featureValue => - val categoryStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, featureValue) - val centroid = if (categoryStats.count != 0) { - if (binAggregates.metadata.isMulticlass) { - // multiclass classification - // For categorical variables in multiclass classification, - // the bins are ordered by the impurity of their corresponding labels. - categoryStats.calculate() - } else if (binAggregates.metadata.isClassification) { - // binary classification - // For categorical variables in binary classification, - // the bins are ordered by the count of class 1. - categoryStats.stats(1) - } else { - // regression - // For categorical variables in regression and binary classification, - // the bins are ordered by the prediction. - categoryStats.predict - } - } else { - Double.MaxValue - } - (featureValue, centroid) - } - - logDebug("Centroids for categorical variable: " + centroidForCategories.mkString(",")) - - // bins sorted by centroids - val categoriesSortedByCentroid = centroidForCategories.toList.sortBy(_._2) - - logDebug("Sorted centroids for categorical variable = " + - categoriesSortedByCentroid.mkString(",")) - - // Cumulative sum (scanLeft) of bin statistics. - // Afterwards, binAggregates for a bin is the sum of aggregates for - // that bin + all preceding bins. - var splitIndex = 0 - while (splitIndex < numSplits) { - val currentCategory = categoriesSortedByCentroid(splitIndex)._1 - val nextCategory = categoriesSortedByCentroid(splitIndex + 1)._1 - binAggregates.mergeForFeature(nodeFeatureOffset, nextCategory, currentCategory) - splitIndex += 1 - } - // lastCategory = index of bin with total aggregates for this (node, feature) - val lastCategory = categoriesSortedByCentroid.last._1 - // Find best split. - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { splitIndex => - val featureValue = categoriesSortedByCentroid(splitIndex)._1 - val leftChildStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, featureValue) - val rightChildStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, lastCategory) - rightChildStats.subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIndex, gainAndImpurityStats) - }.maxBy(_._2.gain) - val categoriesForSplit = - categoriesSortedByCentroid.map(_._1.toDouble).slice(0, bestFeatureSplitIndex + 1) - val bestFeatureSplit = - new CategoricalSplit(featureIndex, categoriesForSplit.toArray, numCategories) - (bestFeatureSplit, bestFeatureGainStats) - } - } - - val (bestSplit, bestSplitStats) = - if (splitsAndImpurityInfo.isEmpty) { - // If no valid splits for features, then this split is invalid, - // return invalid information gain stats. Take any split and continue. - // Splits is empty, so arbitrarily choose to split on any threshold - val dummyFeatureIndex = featuresForNode.map(_.head).getOrElse(0) - val parentImpurityCalculator = binAggregates.getParentImpurityCalculator() - if (binAggregates.metadata.isContinuous(dummyFeatureIndex)) { - (new ContinuousSplit(dummyFeatureIndex, 0), - ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator)) - } else { - val numCategories = binAggregates.metadata.featureArity(dummyFeatureIndex) - (new CategoricalSplit(dummyFeatureIndex, Array(), numCategories), - ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator)) - } - } else { - splitsAndImpurityInfo.maxBy(_._2.gain) - } - (bestSplit, bestSplitStats) - } - - /** - * Returns splits for decision tree calculation. - * Continuous and categorical features are handled differently. - * - * Continuous features: - * For each feature, there are numBins - 1 possible splits representing the possible binary - * decisions at each node in the tree. - * This finds locations (feature values) for splits using a subsample of the data. - * - * Categorical features: - * For each feature, there is 1 bin per split. - * Splits and bins are handled in 2 ways: - * (a) "unordered features" - * For multiclass classification with a low-arity feature - * (i.e., if isMulticlass && isSpaceSufficientForAllCategoricalSplits), - * the feature is split based on subsets of categories. - * (b) "ordered features" - * For regression and binary classification, - * and for multiclass classification with a high-arity feature, - * there is one bin per category. - * - * @param input Training data: RDD of [[LabeledPoint]] - * @param metadata Learning and dataset metadata - * @param seed random seed - * @return Splits, an Array of [[Split]] - * of size (numFeatures, numSplits) - */ - protected[tree] def findSplits( - input: RDD[LabeledPoint], - metadata: DecisionTreeMetadata, - seed: Long, - numFeaturesOptFindSplits: Int = 8192): Array[Array[Split]] = { - - logDebug("isMulticlass = " + metadata.isMulticlass) - - val numFeatures = metadata.numFeatures - - // Sample the input only if there are continuous features. - val continuousFeatures = Range(0, numFeatures).filter(metadata.isContinuous) - val sampledInput = if (continuousFeatures.nonEmpty) { - // Calculate the number of samples for approximate quantile calculation. - val requiredSamples = math.max(metadata.maxBins * metadata.maxBins, 10000) - val fraction = if (requiredSamples < metadata.numExamples) { - requiredSamples.toDouble / metadata.numExamples - } else { - 1.0 - } - logDebug("fraction of data used for calculating quantiles = " + fraction) - input.sample(withReplacement = false, fraction, new XORShiftRandom(seed).nextInt()) - } else { - input.sparkContext.emptyRDD[LabeledPoint] - } - - findSplitsBySorting(sampledInput, metadata, continuousFeatures, numFeaturesOptFindSplits) - } - - private def findSplitsBySorting( - input: RDD[LabeledPoint], - metadata: DecisionTreeMetadata, - continuousFeatures: IndexedSeq[Int], - numFeaturesOptFindSplits: Int = 8192): Array[Array[Split]] = { - - val continuousSplits: scala.collection.Map[Int, Array[Split]] = { - // reduce the parallelism for split computations when there are less - // continuous features than input partitions. this prevents tasks from - // being spun up that will definitely do no work. - val numPartitions = math.min(continuousFeatures.length, input.partitions.length) - - if (continuousFeatures.length < numFeaturesOptFindSplits) { - input - .flatMap(point => continuousFeatures.map(idx => (idx, point.features(idx)))) - .groupByKey(numPartitions) - .map { case (idx, samples) => - val thresholds = findSplitsForContinuousFeature(samples, metadata, idx) - val splits: Array[Split] = thresholds.map(thresh => new ContinuousSplit(idx, thresh)) - logDebug(s"featureIndex = $idx, numSplits = ${splits.length}") - (idx, splits) - }.collectAsMap() - } else { - val numSamples = input.count - require(numSamples < Int.MaxValue) - input.mapPartitions { points => - val partialRes = points.foldLeft(Array.empty[(Int, Array[Double])]) { case (res, point) => - var resNew = res - continuousFeatures.foreach { idx => - val featureValue = point.features(idx) - if (featureValue != 0.0) { - resNew = resNew :+ (idx, Array(featureValue)) - } - } - resNew - } - val restRes = continuousFeatures.indices.diff(partialRes.map(_._1).distinct) - .toArray.map(idx => (idx, Array.empty[Double])) - (partialRes ++ restRes).iterator - }.reduceByKey(_ ++ _).map { case (idx, partialSamples) => - val thresholds = findSplitsForContinuousFeature(partialSamples, metadata, idx, - numSamples.toInt - partialSamples.length) - val splits: Array[Split] = thresholds.map(thresh => new ContinuousSplit(idx, thresh)) - logDebug(s"featureIndex = $idx, numSplits = ${splits.length}") - (idx, splits) - }.collectAsMap() - } - } - - val numFeatures = metadata.numFeatures - val splits: Array[Array[Split]] = Array.tabulate(numFeatures) { - case i if metadata.isContinuous(i) => - val split = continuousSplits(i) - metadata.setNumSplits(i, split.length) - split - - case i if metadata.isCategorical(i) && metadata.isUnordered(i) => - // Unordered features - // 2^(maxFeatureValue - 1) - 1 combinations - val featureArity = metadata.featureArity(i) - Array.tabulate[Split](metadata.numSplits(i)) { splitIndex => - val categories = extractMultiClassCategories(splitIndex + 1, featureArity) - new CategoricalSplit(i, categories.toArray, featureArity) - } - - case i if metadata.isCategorical(i) => - // Ordered features - // Splits are constructed as needed during training. - Array.empty[Split] - } - splits - } - - /** - * Nested method to extract list of eligible categories given an index. It extracts the - * position of ones in a binary representation of the input. If binary - * representation of an number is 01101 (13), the output list should (3.0, 2.0, - * 0.0). The maxFeatureValue depict the number of rightmost digits that will be tested for ones. - */ - private[tree] def extractMultiClassCategories( - input: Int, - maxFeatureValue: Int): List[Double] = { - var categories = List[Double]() - var j = 0 - var bitShiftedInput = input - while (j < maxFeatureValue) { - if (bitShiftedInput % 2 != 0) { - // updating the list of categories. - categories = j.toDouble :: categories - } - // Right shift by one - bitShiftedInput = bitShiftedInput >> 1 - j += 1 - } - categories - } - - /** - * Find splits for a continuous feature - * NOTE: Returned number of splits is set based on `featureSamples` and - * could be different from the specified `numSplits`. - * The `numSplits` attribute in the `DecisionTreeMetadata` class will be set accordingly. - * - * @param featureSamples feature values of each sample - * @param metadata decision tree metadata - * NOTE: `metadata.numbins` will be changed accordingly - * if there are not enough splits to be found - * @param featureIndex feature index to find splits - * @return array of split thresholds - */ - private[tree] def findSplitsForContinuousFeature( - featureSamples: Iterable[Double], - metadata: DecisionTreeMetadata, - featureIndex: Int, - numSamplesOfZeroFeature: Int = 0): Array[Double] = { - require(metadata.isContinuous(featureIndex), - "findSplitsForContinuousFeature can only be used to find splits for a continuous feature.") - - val splits: Array[Double] = if (featureSamples.isEmpty && 0 == numSamplesOfZeroFeature) { - Array.empty[Double] - } else { - val numSplits = metadata.numSplits(featureIndex) - - // get count for each distinct value - val startValueCountPair = if (0 == numSamplesOfZeroFeature) { - (Map.empty[Double, Int], 0) - } else { - (Map(0.0 -> numSamplesOfZeroFeature), numSamplesOfZeroFeature) - } - val (valueCountMap, numSamples) = featureSamples.foldLeft(startValueCountPair) { - case ((m, cnt), x) => - (m + ((x, m.getOrElse(x, 0) + 1)), cnt + 1) - } - // sort distinct values - val valueCounts = valueCountMap.toSeq.sortBy(_._1).toArray - - val possibleSplits = valueCounts.length - 1 - if (possibleSplits == 0) { - // constant feature - Array.empty[Double] - } else if (possibleSplits <= numSplits) { - // if possible splits is not enough or just enough, just return all possible splits - (1 to possibleSplits) - .map(index => (valueCounts(index - 1)._1 + valueCounts(index)._1) / 2.0) - .toArray - } else { - // stride between splits - val stride: Double = numSamples.toDouble / (numSplits + 1) - logDebug("stride = " + stride) - - // iterate `valueCount` to find splits - val splitsBuilder = mutable.ArrayBuilder.make[Double] - var index = 1 - // currentCount: sum of counts of values that have been visited - var currentCount = valueCounts(0)._2 - // targetCount: target value for `currentCount`. - // If `currentCount` is closest value to `targetCount`, - // then current value is a split threshold. - // After finding a split threshold, `targetCount` is added by stride. - var targetCount = stride - while (index < valueCounts.length) { - val previousCount = currentCount - currentCount += valueCounts(index)._2 - val previousGap = math.abs(previousCount - targetCount) - val currentGap = math.abs(currentCount - targetCount) - // If adding count of current value to currentCount - // makes the gap between currentCount and targetCount smaller, - // previous value is a split threshold. - if (previousGap < currentGap) { - splitsBuilder += (valueCounts(index - 1)._1 + valueCounts(index)._1) / 2.0 - targetCount += stride - } - index += 1 - } - - splitsBuilder.result() - } - } - splits - } - - /** - * Pull nodes off of the queue, and collect a group of nodes to be split on this iteration. - * This tracks the memory usage for aggregates and stops adding nodes when too much memory - * will be needed; this allows an adaptive number of nodes since different nodes may require - * different amounts of memory (if featureSubsetStrategy is not "all"). - * - * @param nodeStack Queue of nodes to split. - * @param maxMemoryUsage Bound on size of aggregate statistics. - * @return (nodesForGroup, treeToNodeToIndexInfo). - * nodesForGroup holds the nodes to split: treeIndex --> nodes in tree. - * - * treeToNodeToIndexInfo holds indices selected features for each node: - * treeIndex --> (global) node index --> (node index in group, feature indices). - * The (global) node index is the index in the tree; the node index in group is the - * index in [0, numNodesInGroup) of the node in this group. - * The feature indices are None if not subsampling features. - */ - private[tree] def selectNodesToSplit( - nodeStack: mutable.ArrayStack[(Int, LearningNode)], - maxMemoryUsage: Long, - metadata: DecisionTreeMetadata, - rng: Random): (Map[Int, Array[LearningNode]], Map[Int, Map[Int, NodeIndexInfo]]) = { - // Collect some nodes to split: - // nodesForGroup(treeIndex) = nodes to split - val mutableNodesForGroup = new mutable.HashMap[Int, mutable.ArrayBuffer[LearningNode]]() - val mutableTreeToNodeToIndexInfo = - new mutable.HashMap[Int, mutable.HashMap[Int, NodeIndexInfo]]() - var memUsage: Long = 0L - var numNodesInGroup = 0 - // If maxMemoryInMB is set very small, we want to still try to split 1 node, - // so we allow one iteration if memUsage == 0. - var groupDone = false - while (nodeStack.nonEmpty && !groupDone) { - val (treeIndex, node) = nodeStack.top - // Choose subset of features for node (if subsampling). - val featureSubset: Option[Array[Int]] = if (metadata.subsamplingFeatures) { - Some(SamplingUtils.reservoirSampleAndCount(Range(0, - metadata.numFeatures).iterator, metadata.numFeaturesPerNode, rng.nextLong())._1) - } else { - None - } - // Check if enough memory remains to add this node to the group. - val nodeMemUsage = DecisionForest.aggregateSizeForNode(metadata, featureSubset) * 8L - if (memUsage + nodeMemUsage <= maxMemoryUsage || memUsage == 0) { - nodeStack.pop() - mutableNodesForGroup.getOrElseUpdate(treeIndex, new mutable.ArrayBuffer[LearningNode]()) += - node - mutableTreeToNodeToIndexInfo - .getOrElseUpdate(treeIndex, new mutable.HashMap[Int, NodeIndexInfo]())(node.id) - = new NodeIndexInfo(numNodesInGroup, featureSubset) - numNodesInGroup += 1 - memUsage += nodeMemUsage - } else { - groupDone = true - } - } - if (memUsage > maxMemoryUsage) { - // If maxMemoryUsage is 0, we should still allow splitting 1 node. - logWarning(s"Tree learning is using approximately $memUsage bytes per iteration, which" + - s" exceeds requested limit maxMemoryUsage=$maxMemoryUsage. This allows splitting" + - s" $numNodesInGroup nodes in this iteration.") - } - logWarning(f"[this group] actualMemUsage: ${memUsage/(1024d*1024d)}%.2f MB," + - f" maxMemoryUsage: ${maxMemoryUsage/(1024d*1024d)}%.2f MB.") - // Convert mutable maps to immutable ones. - val nodesForGroup: Map[Int, Array[LearningNode]] = - mutableNodesForGroup.mapValues(_.toArray).toMap - val treeToNodeToIndexInfo = mutableTreeToNodeToIndexInfo.mapValues(_.toMap).toMap - (nodesForGroup, treeToNodeToIndexInfo) - } - - /** - * Get the number of values to be stored for this node in the bin aggregates. - * - * @param featureSubset Indices of features which may be split at this node. - * If None, then use all features. - */ - private def aggregateSizeForNode( - metadata: DecisionTreeMetadata, - featureSubset: Option[Array[Int]]): Long = { - val totalBins = if (featureSubset.nonEmpty) { - featureSubset.get.map(featureIndex => metadata.numBins(featureIndex).toLong).sum - } else { - metadata.numBins.map(_.toLong).sum - } - if (metadata.isClassification) { - metadata.numClasses * totalBins - } else { - 3 * totalBins - } - } -} - -object DecisionForestInfo { - var timerResult: String = "" -} - -case class LocalTrainingTask(node: LearningNode) diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTrees.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTrees.scala deleted file mode 100644 index ddb6925..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTrees.scala +++ /dev/null @@ -1,663 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree.impl - -import it.unimi.dsi.fastutil.doubles.DoubleArrayList - -import org.apache.spark.SparkContext -import org.apache.spark.internal.Logging -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.linalg.Vector -import org.apache.spark.ml.regression.{DecisionTreeRegressionModel, DecisionTreeRegressor} -import org.apache.spark.ml.tree.Split -import org.apache.spark.ml.tree.impl.RandomForest4GBDTX.findSplits -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo} -import org.apache.spark.mllib.tree.configuration.{BoostingStrategy => OldBoostingStrategy} -import org.apache.spark.mllib.tree.impurity.{Variance => OldVariance} -import org.apache.spark.mllib.tree.loss.{Loss => OldLoss} -import org.apache.spark.rdd.RDD -import org.apache.spark.rdd.util.PeriodicRDDCheckpointer -import org.apache.spark.storage.StorageLevel - - -private[spark] object GradientBoostedTrees extends Logging { - - /** - * Method to train a gradient boosting model - * @param input Training dataset: RDD of `LabeledPoint`. - * @param seed Random seed. - * @return tuple of ensemble models and weights: - * (array of decision tree models, array of model weights) - */ - def run( - input: RDD[LabeledPoint], - boostingStrategy: OldBoostingStrategy, - seed: Long, - featureSubsetStrategy: String): (Array[DecisionTreeRegressionModel], Array[Double]) = { - val doUseAcc = getDoUseAccFromSparkConf(input.sparkContext) - run(input, boostingStrategy, seed, featureSubsetStrategy, doUseAcc) - } - - /** Run with extended parameters */ - def run( - input: RDD[LabeledPoint], - boostingStrategy: OldBoostingStrategy, - seed: Long, - featureSubsetStrategy: String, - doUseAcc: Boolean): (Array[DecisionTreeRegressionModel], Array[Double]) = { - val algo = boostingStrategy.treeStrategy.algo - algo match { - case OldAlgo.Regression => - if (doUseAcc) { - GradientBoostedTrees.boostX(input, input, boostingStrategy, validate = false, - seed, featureSubsetStrategy) - } else { - GradientBoostedTrees.boost(input, input, boostingStrategy, validate = false, - seed, featureSubsetStrategy) - } - case OldAlgo.Classification => - // Map labels to -1, +1 so binary classification can be treated as regression. - val remappedInput = input.map(x => new LabeledPoint((x.label * 2) - 1, x.features)) - if (doUseAcc) { - GradientBoostedTrees.boostX(remappedInput, remappedInput, - boostingStrategy, validate = false, seed, featureSubsetStrategy) - } else { - GradientBoostedTrees.boost(remappedInput, remappedInput, - boostingStrategy, validate = false, seed, featureSubsetStrategy) - } - // algo is enumerate value, this case may be unreachable - case _ => - throw new IllegalArgumentException(s"$algo is not supported by gradient boosting.") - } - } - - /** - * Method to validate a gradient boosting model - * @param input Training dataset: RDD of `LabeledPoint`. - * @param validationInput Validation dataset. - * This dataset should be different from the training dataset, - * but it should follow the same distribution. - * E.g., these two datasets could be created from an original dataset - * by using `org.apache.spark.rdd.RDD.randomSplit()` - * @param seed Random seed. - * @return tuple of ensemble models and weights: - * (array of decision tree models, array of model weights) - */ - def runWithValidation( - input: RDD[LabeledPoint], - validationInput: RDD[LabeledPoint], - boostingStrategy: OldBoostingStrategy, - seed: Long, - featureSubsetStrategy: String): (Array[DecisionTreeRegressionModel], Array[Double]) = { - val doUseAcc = getDoUseAccFromSparkConf(input.sparkContext) - runWithValidation(input, validationInput, boostingStrategy, seed, featureSubsetStrategy, - doUseAcc) - } - - /** Run with validation dataset and extended parameters */ - def runWithValidation( - input: RDD[LabeledPoint], - validationInput: RDD[LabeledPoint], - boostingStrategy: OldBoostingStrategy, - seed: Long, - featureSubsetStrategy: String, - doUseAcc: Boolean): (Array[DecisionTreeRegressionModel], Array[Double]) = { - val algo = boostingStrategy.treeStrategy.algo - algo match { - case OldAlgo.Regression => - if (doUseAcc) { - GradientBoostedTrees.boostX(input, validationInput, boostingStrategy, - validate = true, seed, featureSubsetStrategy) - } else { - GradientBoostedTrees.boost(input, validationInput, boostingStrategy, - validate = true, seed, featureSubsetStrategy) - } - case OldAlgo.Classification => - // Map labels to -1, +1 so binary classification can be treated as regression. - val remappedInput = input.map( - x => new LabeledPoint((x.label * 2) - 1, x.features)) - val remappedValidationInput = validationInput.map( - x => new LabeledPoint((x.label * 2) - 1, x.features)) - if (doUseAcc) { - GradientBoostedTrees.boostX(remappedInput, remappedValidationInput, boostingStrategy, - validate = true, seed, featureSubsetStrategy) - } else { - GradientBoostedTrees.boost(remappedInput, remappedValidationInput, boostingStrategy, - validate = true, seed, featureSubsetStrategy) - } - - // algo is enumerate value, this case may be unreachable - case _ => - throw new IllegalArgumentException(s"$algo is not supported by the gradient boosting.") - } - } - - private val extraParamKey = "spark.boostkit.ml.gbdt.doUseAcc" - private val doUseAccDefault = true - - private def getDoUseAccFromSparkConf(sc: SparkContext): Boolean = { - val doUseAcctStr = sc.conf.getOption(extraParamKey) - if (doUseAcctStr.nonEmpty) { - try { - doUseAcctStr.get.toBoolean - } catch { - case ex: Exception => - throw new IllegalArgumentException(s"Parse boostkit parameter" + - s"($extraParamKey) failed, Error reason: ${ex.getMessage}") - } - } else { - doUseAccDefault - } - } - - /** - * Compute the initial predictions and errors for a dataset for the first - * iteration of gradient boosting. - * @param data: training data. - * @param initTreeWeight: learning rate assigned to the first tree. - * @param initTree: first DecisionTreeModel. - * @param loss: evaluation metric. - * @return an RDD with each element being a zip of the prediction and error - * corresponding to every sample. - */ - def computeInitialPredictionAndError( - data: RDD[LabeledPoint], - initTreeWeight: Double, - initTree: DecisionTreeRegressionModel, - loss: OldLoss): RDD[(Double, Double)] = { - data.map { lp => - val pred = updatePrediction(lp.features, 0.0, initTree, initTreeWeight) - val error = loss.computeError(pred, lp.label) - (pred, error) - } - } - - def computeInitialPredictionAndErrorX( - data: RDD[TreePoint], - initTreeWeight: Double, - initTree: DecisionTreeRegressionModel, - loss: OldLoss, - splits: Array[Array[Split]]): RDD[(Double, Double)] = { - data.map { lp => - val pred = updatePredictionX(lp.binnedFeatures, 0.0, initTree, initTreeWeight, splits) - val error = loss.computeError(pred, lp.label) - (pred, error) - } - } - - /** - * Update a zipped predictionError RDD - * (as obtained with computeInitialPredictionAndError) - * @param data: training data. - * @param predictionAndError: predictionError RDD - * @param treeWeight: Learning rate. - * @param tree: Tree using which the prediction and error should be updated. - * @param loss: evaluation metric. - * @return an RDD with each element being a zip of the prediction and error - * corresponding to each sample. - */ - def updatePredictionError( - data: RDD[LabeledPoint], - predictionAndError: RDD[(Double, Double)], - treeWeight: Double, - tree: DecisionTreeRegressionModel, - loss: OldLoss): RDD[(Double, Double)] = { - - val newPredError = data.zip(predictionAndError).mapPartitions { iter => - iter.map { case (lp, (pred, error)) => - val newPred = updatePrediction(lp.features, pred, tree, treeWeight) - val newError = loss.computeError(newPred, lp.label) - (newPred, newError) - } - } - newPredError - } - - def updatePredictionErrorX( - data: RDD[TreePoint], - predictionAndError: RDD[(Double, Double)], - treeWeight: Double, - tree: DecisionTreeRegressionModel, - loss: OldLoss, - splits: Array[Array[Split]]): RDD[(Double, Double)] = { - - val newPredError = data.zip(predictionAndError).mapPartitions { iter => - iter.map { case (lp, (pred, error)) => - val newPred = updatePredictionX(lp.binnedFeatures, pred, tree, treeWeight, splits) - val newError = loss.computeError(newPred, lp.label) - (newPred, newError) - } - } - newPredError - } - - /** - * Add prediction from a new boosting iteration to an existing prediction. - * - * @param features Vector of features representing a single data point. - * @param prediction The existing prediction. - * @param tree New Decision Tree model. - * @param weight Tree weight. - * @return Updated prediction. - */ - def updatePrediction( - features: Vector, - prediction: Double, - tree: DecisionTreeRegressionModel, - weight: Double): Double = { - prediction + tree.rootNode.predictImpl(features).prediction * weight - } - - def updatePredictionX( - features: Array[Int], - prediction: Double, - tree: DecisionTreeRegressionModel, - weight: Double, - splits: Array[Array[Split]]): Double = { - prediction + tree.rootNode.predictImplX(features, splits).prediction * weight - } - - /** - * Method to calculate error of the base learner for the gradient boosting calculation. - * Note: This method is not used by the gradient boosting algorithm but is useful for debugging - * purposes. - * @param data Training dataset: RDD of `LabeledPoint`. - * @param trees Boosted Decision Tree models - * @param treeWeights Learning rates at each boosting iteration. - * @param loss evaluation metric. - * @return Measure of model error on data - */ - def computeError( - data: RDD[LabeledPoint], - trees: Array[DecisionTreeRegressionModel], - treeWeights: Array[Double], - loss: OldLoss): Double = { - data.map { lp => - val predicted = trees.zip(treeWeights).foldLeft(0.0) { case (acc, (model, weight)) => - updatePrediction(lp.features, acc, model, weight) - } - loss.computeError(predicted, lp.label) - }.mean() - } - - /** - * Method to compute error or loss for every iteration of gradient boosting. - * - * @param data RDD of `LabeledPoint` - * @param trees Boosted Decision Tree models - * @param treeWeights Learning rates at each boosting iteration. - * @param loss evaluation metric. - * @param algo algorithm for the ensemble, either Classification or Regression - * @return an array with index i having the losses or errors for the ensemble - * containing the first i+1 trees - */ - def evaluateEachIteration( - data: RDD[LabeledPoint], - trees: Array[DecisionTreeRegressionModel], - treeWeights: Array[Double], - loss: OldLoss, - algo: OldAlgo.Value): Array[Double] = { - - val sc = data.sparkContext - val remappedData = algo match { - case OldAlgo.Classification => data.map(x => new LabeledPoint((x.label * 2) - 1, x.features)) - case _ => data - } - - val broadcastTrees = sc.broadcast(trees) - val localTreeWeights = treeWeights - val treesIndices = trees.indices - - val dataCount = remappedData.count() - val evaluation = remappedData.map { point => - treesIndices.map { idx => - val prediction = broadcastTrees.value(idx) - .rootNode - .predictImpl(point.features) - .prediction - prediction * localTreeWeights(idx) - } - .scanLeft(0.0)(_ + _).drop(1) - .map(prediction => loss.computeError(prediction, point.label)) - } - .aggregate(treesIndices.map(_ => 0.0))( - (aggregated, row) => treesIndices.map(idx => aggregated(idx) + row(idx)), - (a, b) => treesIndices.map(idx => a(idx) + b(idx))) - .map(_ / dataCount) - - broadcastTrees.destroy(blocking = false) - evaluation.toArray - } - - /** - * Internal method for performing regression using trees as base learners. - * @param input training dataset - * @param validationInput validation dataset, ignored if validate is set to false. - * @param boostingStrategy boosting parameters - * @param validate whether or not to use the validation dataset. - * @param seed Random seed. - * @return tuple of ensemble models and weights: - * (array of decision tree models, array of model weights) - */ - def boost( - input: RDD[LabeledPoint], - validationInput: RDD[LabeledPoint], - boostingStrategy: OldBoostingStrategy, - validate: Boolean, - seed: Long, - featureSubsetStrategy: String): (Array[DecisionTreeRegressionModel], Array[Double]) = { - val timer = new TimeTracker() - timer.start("total") - timer.start("init") - - boostingStrategy.assertValid() - - // Initialize gradient boosting parameters - val numIterations = boostingStrategy.numIterations - val baseLearners = new Array[DecisionTreeRegressionModel](numIterations) - val baseLearnerWeights = new Array[Double](numIterations) - val loss = boostingStrategy.loss - val learningRate = boostingStrategy.learningRate - - // Prepare strategy for individual trees, which use regression with variance impurity. - val treeStrategy = boostingStrategy.treeStrategy.copy - val validationTol = boostingStrategy.validationTol - treeStrategy.algo = OldAlgo.Regression - treeStrategy.impurity = OldVariance - treeStrategy.assertValid() - - // Cache input - val persistedInput = if (input.getStorageLevel == StorageLevel.NONE) { - input.persist(StorageLevel.MEMORY_AND_DISK) - true - } else { - false - } - - // Prepare periodic checkpointers - val predErrorCheckpointer = new PeriodicRDDCheckpointer[(Double, Double)]( - treeStrategy.getCheckpointInterval, input.sparkContext) - val validatePredErrorCheckpointer = new PeriodicRDDCheckpointer[(Double, Double)]( - treeStrategy.getCheckpointInterval, input.sparkContext) - - timer.stop("init") - - logDebug("##########") - logDebug("Building tree 0") - logDebug("##########") - - // Initialize tree - timer.start("building tree 0") - val firstTree = new DecisionTreeRegressor().setSeed(seed) - val firstTreeModel = firstTree.train(input, treeStrategy, featureSubsetStrategy) - val firstTreeWeight = 1.0 - baseLearners(0) = firstTreeModel - baseLearnerWeights(0) = firstTreeWeight - - var predError: RDD[(Double, Double)] = - computeInitialPredictionAndError(input, firstTreeWeight, firstTreeModel, loss) - predErrorCheckpointer.update(predError) - logDebug(s"error of gbt = ${predError.values.mean()}") - - // Note: A model of type regression is used since we require raw prediction - timer.stop("building tree 0") - - var validatePredError: RDD[(Double, Double)] = - computeInitialPredictionAndError(validationInput, firstTreeWeight, firstTreeModel, loss) - if (validate) validatePredErrorCheckpointer.update(validatePredError) - var bestValidateError = if (validate) validatePredError.values.mean() else 0.0 - var bestM = 1 - - var m = 1 - var doneLearning = false - while (m < numIterations && !doneLearning) { - // Update data with pseudo-residuals - val data = predError.zip(input).map { case ((pred, _), point) => - LabeledPoint(-loss.gradient(pred, point.label), point.features) - } - - timer.start(s"building tree $m") - logDebug("###################################################") - logDebug(s"Gradient boosting tree iteration ${m}") - logDebug("###################################################") - - val dt = new DecisionTreeRegressor().setSeed(seed + m) - val model = dt.train(data, treeStrategy, featureSubsetStrategy) - timer.stop(s"building tree $m") - // Update partial model - baseLearners(m) = model - // Note: The setting of baseLearnerWeights is incorrect for losses other than SquaredError. - // Technically, the weight should be optimized for the particular loss. - // However, the behavior should be reasonable, though not optimal. - baseLearnerWeights(m) = learningRate - - predError = updatePredictionError( - input, predError, baseLearnerWeights(m), baseLearners(m), loss) - predErrorCheckpointer.update(predError) - logDebug(s"error of gbt = ${predError.values.mean()}") - - if (validate) { - // Stop training early if - // 1. Reduction in error is less than the validationTol or - // 2. If the error increases, that is if the model is overfit. - // We want the model returned corresponding to the best validation error. - - validatePredError = updatePredictionError( - validationInput, validatePredError, baseLearnerWeights(m), baseLearners(m), loss) - validatePredErrorCheckpointer.update(validatePredError) - val currentValidateError = validatePredError.values.mean() - if (bestValidateError - currentValidateError < validationTol * Math.max( - currentValidateError, 0.01)) { - doneLearning = true - } else if (currentValidateError < bestValidateError) { - bestValidateError = currentValidateError - bestM = m + 1 - } - } - m += 1 - } - - timer.stop("total") - - logInfo("Internal timing for DecisionTree:") - logInfo(s"$timer") - - predErrorCheckpointer.unpersistDataSet() - predErrorCheckpointer.deleteAllCheckpoints() - validatePredErrorCheckpointer.unpersistDataSet() - validatePredErrorCheckpointer.deleteAllCheckpoints() - if (persistedInput) input.unpersist() - - if (validate) { - (baseLearners.slice(0, bestM), baseLearnerWeights.slice(0, bestM)) - } else { - (baseLearners, baseLearnerWeights) - } - } - - /** - * Internal method for performing regression using trees as base learners. - * @param input training dataset - * @param validationInput validation dataset, ignored if validate is set to false. - * @param boostingStrategy boosting parameters - * @param validate whether or not to use the validation dataset. - * @param seed Random seed. - * @return tuple of ensemble models and weights: - * (array of decision tree models, array of model weights) - */ - def boostX( - input: RDD[LabeledPoint], - validationInput: RDD[LabeledPoint], - boostingStrategy: OldBoostingStrategy, - validate: Boolean, - seed: Long, - featureSubsetStrategy: String): (Array[DecisionTreeRegressionModel], Array[Double]) = { - val timer = new TimeTracker() - timer.start("total") - timer.start("init") - - boostingStrategy.assertValid() - - // Initialize gradient boosting parameters - val numIterations = boostingStrategy.numIterations - val baseLearners = new Array[DecisionTreeRegressionModel](numIterations) - val baseLearnerWeights = new Array[Double](numIterations) - val loss = boostingStrategy.loss - val learningRate = boostingStrategy.learningRate - - // Prepare strategy for individual trees, which use regression with variance impurity. - val treeStrategy = boostingStrategy.treeStrategy.copy - val validationTol = boostingStrategy.validationTol - treeStrategy.algo = OldAlgo.Regression - treeStrategy.impurity = OldVariance - treeStrategy.assertValid() - - // Prepare periodic checkpointers - val predErrorCheckpointer = new PeriodicRDDCheckpointer[(Double, Double)]( - treeStrategy.getCheckpointInterval, input.sparkContext) - val validatePredErrorCheckpointer = new PeriodicRDDCheckpointer[(Double, Double)]( - treeStrategy.getCheckpointInterval, input.sparkContext) - - // X - val retaggedInput = input.retag(classOf[LabeledPoint]) - val metadata = - DecisionTreeMetadata.buildMetadata(retaggedInput, treeStrategy, 1, featureSubsetStrategy) - - // Find the splits and the corresponding bins (interval between the splits) using a sample - // of the input data. - timer.start("findSplits") - val splits = findSplits(retaggedInput, metadata, seed) - timer.stop("findSplits") - logDebug("numBins: feature: number of bins") - logDebug(Range(0, metadata.numFeatures).map { featureIndex => - s"\t$featureIndex\t${metadata.numBins(featureIndex)}" - }.mkString("\n")) - - val (treeInput, processedInput, labelArrayBcTmp, rawPartInfoBcTmp) = - GradientBoostedTreesUtil.dataProcessX(retaggedInput, splits, treeStrategy, metadata, timer, - seed) - var rawPartInfoBc = rawPartInfoBcTmp - var labelArrayBc = labelArrayBcTmp - - // X - timer.stop("init") - - logDebug("##########") - logDebug("Building tree 0") - logDebug("##########") - - // Initialize tree - timer.start("building tree 0") - val firstTree = new DecisionTreeRegressor().setSeed(seed) - val firstTreeModel = firstTree.train4GBDTX(labelArrayBc, processedInput, metadata, splits, - treeStrategy, featureSubsetStrategy, treeInput, rawPartInfoBc) - val firstTreeWeight = 1.0 - baseLearners(0) = firstTreeModel - baseLearnerWeights(0) = firstTreeWeight - - var predError: RDD[(Double, Double)] = - computeInitialPredictionAndErrorX(treeInput, firstTreeWeight, firstTreeModel, loss, splits) - predErrorCheckpointer.update(predError) - logDebug(s"error of gbt = ${predError.values.mean()}") - - // Note: A model of type regression is used since we require raw prediction - timer.stop("building tree 0") - - var validatePredError: RDD[(Double, Double)] = - computeInitialPredictionAndError(validationInput, firstTreeWeight, firstTreeModel, loss) - if (validate) validatePredErrorCheckpointer.update(validatePredError) - var bestValidateError = if (validate) validatePredError.values.mean() else 0.0 - var bestM = 1 - - var m = 1 - var doneLearning = false - while (m < numIterations && !doneLearning) { - labelArrayBc = treeInput.sparkContext.broadcast( - DoubleArrayList.wrap( - predError.zip(treeInput).map { case ((pred, _), point) => - -loss.gradient(pred, point.label)}.collect() - ) - ) - - timer.start(s"building tree $m") - logDebug("###################################################") - logDebug(s"Gradient boosting tree iteration ${m}") - logDebug("###################################################") - - val dt = new DecisionTreeRegressor().setSeed(seed + m) - val model = dt.train4GBDTX(labelArrayBc, processedInput, metadata, splits, treeStrategy, - featureSubsetStrategy, treeInput, rawPartInfoBc) - timer.stop(s"building tree $m") - // Update partial model - baseLearners(m) = model - // Note: The setting of baseLearnerWeights is incorrect for losses other than SquaredError. - // Technically, the weight should be optimized for the particular loss. - // However, the behavior should be reasonable, though not optimal. - baseLearnerWeights(m) = learningRate - - predError = updatePredictionErrorX( - treeInput, predError, baseLearnerWeights(m), baseLearners(m), loss, splits) - predErrorCheckpointer.update(predError) - logDebug(s"error of gbt = ${predError.values.mean()}") - - if (validate) { - // Stop training early if - // 1. Reduction in error is less than the validationTol or - // 2. If the error increases, that is if the model is overfit. - // We want the model returned corresponding to the best validation error. - - validatePredError = updatePredictionError( - validationInput, validatePredError, baseLearnerWeights(m), baseLearners(m), loss) - validatePredErrorCheckpointer.update(validatePredError) - val currentValidateError = validatePredError.values.mean() - if (bestValidateError - currentValidateError < validationTol * Math.max( - currentValidateError, 0.01)) { - doneLearning = true - } else if (currentValidateError < bestValidateError) { - bestValidateError = currentValidateError - bestM = m + 1 - } - } - m += 1 - } - - timer.stop("total") - - logInfo("Internal timing for DecisionTree:") - logInfo(s"$timer") - - predErrorCheckpointer.unpersistDataSet() - predErrorCheckpointer.deleteAllCheckpoints() - validatePredErrorCheckpointer.unpersistDataSet() - validatePredErrorCheckpointer.deleteAllCheckpoints() - treeInput.unpersist() - processedInput.unpersist() - - if (validate) { - (baseLearners.slice(0, bestM), baseLearnerWeights.slice(0, bestM)) - } else { - (baseLearners, baseLearnerWeights) - } - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/NodeIdCache.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/NodeIdCache.scala deleted file mode 100644 index 7fb606d..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/NodeIdCache.scala +++ /dev/null @@ -1,360 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree.impl - -import java.io.IOException - -import scala.collection.mutable - -import org.apache.hadoop.fs.Path - -import org.apache.spark.internal.Logging -import org.apache.spark.ml.tree.{LearningNode, Split, SplitBase} -import org.apache.spark.rdd.RDD -import org.apache.spark.storage.StorageLevel - -/** - * This is used by the node id cache to find the child id that a data point would belong to. - * @param split Split information. - * @param nodeIndex The current node index of a data point that this will update. - */ -private[tree] case class NodeIndexUpdaterRaw(split: Split, nodeIndex: Int) { - - /** - * Determine a child node index based on the feature value and the split. - * @param binnedFeature Binned feature value. - * @param splits Split information to convert the bin indices to approximate feature values. - * @return Child node index to update to. - */ - def updateNodeIndex(binnedFeature: Int, splits: Array[Split]): Int = { - if (split.shouldGoLeft(binnedFeature, splits)) { - LearningNode.leftChildIndex(nodeIndex) - } else { - LearningNode.rightChildIndex(nodeIndex) - } - } -} - -/** - * This is used by the node id cache to find the child id that a data point would belong to. - * @param split Split information. - * @param nodeIndex The current node index of a data point that this will update. - */ -private[tree] case class NodeIndexUpdater(split: SplitBase, nodeIndex: Int) { - - /** - * Determine a child node index based on the feature value and the split. - * @param binnedFeature Binned feature value. - * @param splits Split information to convert the bin indices to approximate feature values. - * @return Child node index to update to. - */ - def updateNodeIndex(binnedFeature: Char, splits: Array[SplitBase]): Int = { - if (split.shouldGoLeft(binnedFeature, splits)) { - LearningNode.leftChildIndex(nodeIndex) - } else { - LearningNode.rightChildIndex(nodeIndex) - } - } -} - -/** - * Each TreePoint belongs to a particular node per tree. - * Each row in the nodeIdsForInstances RDD is an array over trees of the node index - * in each tree. Initially, values should all be 1 for root node. - * The nodeIdsForInstances RDD needs to be updated at each iteration. - * @param nodeIdsForInstances The initial values in the cache - * (should be an Array of all 1's (meaning the root nodes)). - * @param checkpointInterval The checkpointing interval - * (how often should the cache be checkpointed.). - */ -private[spark] class NodeIdCache( - var nodeIdsForInstances: RDD[Array[Int]], - val checkpointInterval: Int) extends Logging { - - // Keep a reference to a previous node Ids for instances. - // Because we will keep on re-persisting updated node Ids, - // we want to unpersist the previous RDD. - private var prevNodeIdsForInstances: RDD[Array[Int]] = null - - // To keep track of the past checkpointed RDDs. - private val checkpointQueue = mutable.Queue[RDD[Array[Int]]]() - private var rddUpdateCount = 0 - - // Indicates whether we can checkpoint - private val canCheckpoint = nodeIdsForInstances.sparkContext.getCheckpointDir.nonEmpty - - // Hadoop Configuration for deleting checkpoints as needed - private val hadoopConf = nodeIdsForInstances.sparkContext.hadoopConfiguration - - /** - * Update the node index values in the cache. - * This updates the RDD and its lineage. - * TODO: Passing bin information to executors seems unnecessary and costly. - * @param data The RDD of training rows. - * @param nodeIdUpdaters A map of node index updaters. - * The key is the indices of nodes that we want to update. - * @param splits Split information needed to find child node indices. - */ - def updateNodeIndices( - data: RDD[BaggedPoint[TreePointX]], - nodeIdUpdaters: Array[mutable.Map[Int, NodeIndexUpdater]], - splits: Array[Array[SplitBase]]): Unit = { - if (prevNodeIdsForInstances != null) { - // Unpersist the previous one if one exists. - prevNodeIdsForInstances.unpersist() - } - - prevNodeIdsForInstances = nodeIdsForInstances - nodeIdsForInstances = data.zip(nodeIdsForInstances).map { case (point, ids) => - var treeId = 0 - while (treeId < nodeIdUpdaters.length) { - val nodeIdUpdater = nodeIdUpdaters(treeId).getOrElse(ids(treeId), null) - if (nodeIdUpdater != null) { - val featureIndex = nodeIdUpdater.split.featureIndex - val newNodeIndex = nodeIdUpdater.updateNodeIndex( - binnedFeature = point.datum.binnedFeatures.get(featureIndex), - splits = splits(featureIndex)) - ids(treeId) = newNodeIndex - } - treeId += 1 - } - ids - } - - // Keep on persisting new ones. - nodeIdsForInstances.persist(StorageLevel.MEMORY_AND_DISK) - rddUpdateCount += 1 - - // Handle checkpointing if the directory is not None. - if (canCheckpoint && checkpointInterval != -1 && (rddUpdateCount % checkpointInterval) == 0) { - // Let's see if we can delete previous checkpoints. - var canDelete = true - while (checkpointQueue.size > 1 && canDelete) { - // We can delete the oldest checkpoint iff - // the next checkpoint actually exists in the file system. - if (checkpointQueue(1).getCheckpointFile.isDefined) { - val old = checkpointQueue.dequeue() - // Since the old checkpoint is not deleted by Spark, we'll manually delete it here. - try { - val path = new Path(old.getCheckpointFile.get) - val fs = path.getFileSystem(hadoopConf) - fs.delete(path, true) - } catch { - case e: IOException => - logError("Decision Tree learning using cacheNodeIds failed to remove checkpoint" + - s" file: ${old.getCheckpointFile.get}") - } - } else { - canDelete = false - } - } - - nodeIdsForInstances.checkpoint() - checkpointQueue.enqueue(nodeIdsForInstances) - } - } - - /** - * Update the node index values in the cache. - * This updates the RDD and its lineage. - * TODO: Passing bin information to executors seems unnecessary and costly. - * @param data The RDD of training rows. - * @param nodeIdUpdaters A map of node index updaters. - * The key is the indices of nodes that we want to update. - * @param splits Split information needed to find child node indices. - */ - def updateNodeIndicesRaw( - data: RDD[BaggedPoint[TreePoint]], - nodeIdUpdaters: Array[mutable.Map[Int, NodeIndexUpdaterRaw]], - splits: Array[Array[Split]]): Unit = { - if (prevNodeIdsForInstances != null) { - // Unpersist the previous one if one exists. - prevNodeIdsForInstances.unpersist() - } - - prevNodeIdsForInstances = nodeIdsForInstances - nodeIdsForInstances = data.zip(nodeIdsForInstances).map { case (point, ids) => - var treeId = 0 - while (treeId < nodeIdUpdaters.length) { - val nodeIdUpdater = nodeIdUpdaters(treeId).getOrElse(ids(treeId), null) - if (nodeIdUpdater != null) { - val featureIndex = nodeIdUpdater.split.featureIndex - val newNodeIndex = nodeIdUpdater.updateNodeIndex( - binnedFeature = point.datum.binnedFeatures(featureIndex), - splits = splits(featureIndex)) - ids(treeId) = newNodeIndex - } - treeId += 1 - } - ids - } - - // Keep on persisting new ones. - nodeIdsForInstances.persist(StorageLevel.MEMORY_AND_DISK) - rddUpdateCount += 1 - - // Handle checkpointing if the directory is not None. - if (canCheckpoint && checkpointInterval != -1 && (rddUpdateCount % checkpointInterval) == 0) { - // Let's see if we can delete previous checkpoints. - var canDelete = true - while (checkpointQueue.size > 1 && canDelete) { - // We can delete the oldest checkpoint iff - // the next checkpoint actually exists in the file system. - if (checkpointQueue(1).getCheckpointFile.isDefined) { - val old = checkpointQueue.dequeue() - // Since the old checkpoint is not deleted by Spark, we'll manually delete it here. - try { - val path = new Path(old.getCheckpointFile.get) - val fs = path.getFileSystem(hadoopConf) - fs.delete(path, true) - } catch { - case e: IOException => - logError("Decision Tree learning using cacheNodeIds failed to remove checkpoint" + - s" file: ${old.getCheckpointFile.get}") - } - } else { - canDelete = false - } - } - - nodeIdsForInstances.checkpoint() - checkpointQueue.enqueue(nodeIdsForInstances) - } - } - - /** - * Update the node index values in the cache. - * This updates the RDD and its lineage. - * TODO: Passing bin information to executors seems unnecessary and costly. - * @param data The RDD of training rows. - * @param nodeIdUpdaters A map of node index updaters. - * The key is the indices of nodes that we want to update. - * @param splits Split information needed to find child node indices. - */ - def updateNodeIndicesY( - data: RDD[BaggedPoint[TreePointY]], - nodeIdUpdaters: Array[mutable.Map[Int, NodeIndexUpdaterRaw]], - splits: Array[Array[Split]]): Unit = { - if (prevNodeIdsForInstances != null) { - // Unpersist the previous one if one exists. - prevNodeIdsForInstances.unpersist() - } - - prevNodeIdsForInstances = nodeIdsForInstances - nodeIdsForInstances = data.zip(nodeIdsForInstances).map { case (point, ids) => - var treeId = 0 - while (treeId < nodeIdUpdaters.length) { - val nodeIdUpdater = nodeIdUpdaters(treeId).getOrElse(ids(treeId), null) - if (nodeIdUpdater != null) { - val featureIndex = nodeIdUpdater.split.featureIndex - val newNodeIndex = nodeIdUpdater.updateNodeIndex( - binnedFeature = point.datum.binnedFeatures.get(featureIndex), - splits = splits(featureIndex)) - ids(treeId) = newNodeIndex - } - treeId += 1 - } - ids - } - - // Keep on persisting new ones. - nodeIdsForInstances.persist(StorageLevel.MEMORY_AND_DISK) - rddUpdateCount += 1 - - // Handle checkpointing if the directory is not None. - if (canCheckpoint && checkpointInterval != -1 && (rddUpdateCount % checkpointInterval) == 0) { - // Let's see if we can delete previous checkpoints. - var canDelete = true - while (checkpointQueue.size > 1 && canDelete) { - // We can delete the oldest checkpoint iff - // the next checkpoint actually exists in the file system. - if (checkpointQueue(1).getCheckpointFile.isDefined) { - val old = checkpointQueue.dequeue() - // Since the old checkpoint is not deleted by Spark, we'll manually delete it here. - try { - val path = new Path(old.getCheckpointFile.get) - val fs = path.getFileSystem(hadoopConf) - fs.delete(path, true) - } catch { - case e: IOException => - logError("Decision Tree learning using cacheNodeIds failed to remove checkpoint" + - s" file: ${old.getCheckpointFile.get}") - } - } else { - canDelete = false - } - } - - nodeIdsForInstances.checkpoint() - checkpointQueue.enqueue(nodeIdsForInstances) - } - } - - - /** - * Call this after training is finished to delete any remaining checkpoints. - */ - def deleteAllCheckpoints(): Unit = { - while (checkpointQueue.nonEmpty) { - val old = checkpointQueue.dequeue() - if (old.getCheckpointFile.isDefined) { - try { - val path = new Path(old.getCheckpointFile.get) - val fs = path.getFileSystem(hadoopConf) - fs.delete(path, true) - } catch { - case e: IOException => - logError("Decision Tree learning using cacheNodeIds failed to remove checkpoint" + - s" file: ${old.getCheckpointFile.get}") - } - } - } - if (prevNodeIdsForInstances != null) { - // Unpersist the previous one if one exists. - prevNodeIdsForInstances.unpersist() - } - } -} - -private[spark] object NodeIdCache { - /** - * Initialize the node Id cache with initial node Id values. - * @param data The RDD of training rows. - * @param numTrees The number of trees that we want to create cache for. - * @param checkpointInterval The checkpointing interval - * (how often should the cache be checkpointed.). - * @param initVal The initial values in the cache. - * @return A node Id cache containing an RDD of initial root node Indices. - */ - def init[T]( - data: RDD[T], - numTrees: Int, - checkpointInterval: Int, - initVal: Int = 1): NodeIdCache = { - new NodeIdCache( - data.map(_ => Array.fill[Int](numTrees)(initVal)), - checkpointInterval) - } -} - diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForest.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForest.scala deleted file mode 100644 index 17c8775..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForest.scala +++ /dev/null @@ -1,1231 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree.impl - -import java.io.IOException - -import scala.collection.mutable -import scala.util.Random - -import org.apache.spark.internal.Logging -import org.apache.spark.ml.classification.DecisionTreeClassificationModel -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.regression.DecisionTreeRegressionModel -import org.apache.spark.ml.tree._ -import org.apache.spark.ml.util.Instrumentation -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, Strategy => OldStrategy} -import org.apache.spark.mllib.tree.impurity.ImpurityCalculator -import org.apache.spark.mllib.tree.model.ImpurityStats -import org.apache.spark.rdd.RDD -import org.apache.spark.util.random.{SamplingUtils, XORShiftRandom} - - -/** - * ALGORITHM - * - * This is a sketch of the algorithm to help new developers. - * - * The algorithm partitions data by instances (rows). - * On each iteration, the algorithm splits a set of nodes. In order to choose the best split - * for a given node, sufficient statistics are collected from the distributed data. - * For each node, the statistics are collected to some worker node, and that worker selects - * the best split. - * - * This setup requires discretization of continuous features. This binning is done in the - * findSplits() method during initialization, after which each continuous feature becomes - * an ordered discretized feature with at most maxBins possible values. - * - * The main loop in the algorithm operates on a queue of nodes (nodeStack). These nodes - * lie at the periphery of the tree being trained. If multiple trees are being trained at once, - * then this queue contains nodes from all of them. Each iteration works roughly as follows: - * On the master node: - * - Some number of nodes are pulled off of the queue (based on the amount of memory - * required for their sufficient statistics). - * - For random forests, if featureSubsetStrategy is not "all," then a subset of candidate - * features are chosen for each node. See method selectNodesToSplit(). - * On worker nodes, via method findBestSplits(): - * - The worker makes one pass over its subset of instances. - * - For each (tree, node, feature, split) tuple, the worker collects statistics about - * splitting. Note that the set of (tree, node) pairs is limited to the nodes selected - * from the queue for this iteration. The set of features considered can also be limited - * based on featureSubsetStrategy. - * - For each node, the statistics for that node are aggregated to a particular worker - * via reduceByKey(). The designated worker chooses the best (feature, split) pair, - * or chooses to stop splitting if the stopping criteria are met. - * On the master node: - * - The master collects all decisions about splitting nodes and updates the model. - * - The updated model is passed to the workers on the next iteration. - * This process continues until the node queue is empty. - * - * Most of the methods in this implementation support the statistics aggregation, which is - * the heaviest part of the computation. In general, this implementation is bound by either - * the cost of statistics computation on workers or by communicating the sufficient statistics. - */ -private[spark] object RandomForest extends Logging { - - /** - * Train a random forest. - * - * @param input Training data: RDD of `LabeledPoint` - * @return an unweighted set of trees - */ - def run( - input: RDD[LabeledPoint], - strategy: OldStrategy, - numTrees: Int, - featureSubsetStrategy: String, - seed: Long, - instr: Option[Instrumentation[_]], - parentUID: Option[String] = None): Array[DecisionTreeModel] = { - val exParams = RFUtils.parseExtraParams(input, strategy) - runX(input, strategy, numTrees, featureSubsetStrategy, seed, instr, exParams, parentUID) - } - - /** - * Train a random forest. - * - * @param input Training data: RDD of `LabeledPoint` - * @return an unweighted set of trees - */ - def runX( - input: RDD[LabeledPoint], - strategy: OldStrategy, - numTrees: Int, - featureSubsetStrategy: String, - seed: Long, - instr: Option[Instrumentation[_]], - extraParams: RFExtraParams, - parentUID: Option[String] = None): Array[DecisionTreeModel] = { - - RandomForestInfo.timerResult = "" - val timer = new TimeTracker() - - timer.start("total") - - timer.start("init") - - val binnedFeaturesType = BinnedFeaturesDataType.withName(extraParams.featuresDataType) - val retaggedInput = input.retag(classOf[LabeledPoint]) - // featureSubsetStrategy: The number of features to consider for splits at each tree node. - // featureSubsetStrategy: default value is "auto" for random forest. - // impurity: default value is "gini" for random forest. - val metadata = - DecisionTreeMetadata.buildMetadata(retaggedInput, strategy, numTrees, featureSubsetStrategy) - logWarning(s"decisionTreeMetadata details: ${metadata.numFeatures}," + - s" ${metadata.numExamples}, ${metadata.numClasses: Int}, ${metadata.maxBins: Int}," + - s" ${metadata.featureArity}, ${metadata.unorderedFeatures.mkString("[", ";", "]")}," + - s" ${metadata.impurity}, ${metadata.quantileStrategy}, ${metadata.maxDepth: Int}," + - s" ${metadata.minInstancesPerNode: Int}, ${metadata.minInfoGain: Double}," + - s" ${metadata.numTrees: Int}, ${metadata.numFeaturesPerNode: Int}, ${binnedFeaturesType}") - instr match { - case Some(instrumentation) => - instrumentation.logNumFeatures(metadata.numFeatures) - instrumentation.logNumClasses(metadata.numClasses) - case None => - logInfo(s"numFeatures: ${metadata.numFeatures}") - logInfo(s"numClasses: ${metadata.numClasses}") - } - - // Find the splits and the corresponding bins (interval between the splits) using a sample - // of the input data. - timer.start("findSplits") - val splits = findSplits(retaggedInput, metadata, seed) - val baseSplits = - splits.map(v => v.zipWithIndex.map{case (split, binIdx) => Split.toBase(split, binIdx)}) - timer.stop("findSplits") - logDebug("numBins: feature: number of bins") - logDebug(Range(0, metadata.numFeatures).map { featureIndex => - s"\t$featureIndex\t${metadata.numBins(featureIndex)}" - }.mkString("\n")) - - // Bin feature values (TreePoint representation). - // Cache input RDD for speedup during multiple passes. - val treeInput = TreePointX.convertToTreeRDD(retaggedInput, splits, metadata, binnedFeaturesType) - - val withReplacement = numTrees > 1 - - // Default value of subsamplingRate is 1 for random forest. - val baggedInputOri = BaggedPoint.convertToBaggedRDD(treeInput, strategy.subsamplingRate, - numTrees, withReplacement, seed) - - val baggedInput = RFUtils.transformBaggedRDD(baggedInputOri, extraParams) - - // depth of the decision tree - val maxDepth = strategy.maxDepth - require(maxDepth <= 30, - s"DecisionTree currently only supports maxDepth <= 30, but was given maxDepth = $maxDepth.") - - // Max memory usage for aggregates - // TODO: Calculate memory usage more precisely. - val maxMemoryUsage: Long = strategy.maxMemoryInMB * 1024L * 1024L - logDebug(s"max memory usage for aggregates = ${maxMemoryUsage} bytes.") - - /* - * The main idea here is to perform group-wise training of the decision tree nodes thus - * reducing the passes over the data from (# nodes) to (# nodes / maxNumberOfNodesPerGroup). - * Each data sample is handled by a particular node (or it reaches a leaf and is not used - * in lower levels). - */ - - // Create an RDD of node Id cache. - // At first, all the rows belong to the root nodes (node Id == 1). - // Default value of useNodeIdCache is false for random forest. - val nodeIdCache = if (strategy.useNodeIdCache) { - Some(NodeIdCache.init( - data = baggedInput, - numTrees = numTrees, - checkpointInterval = strategy.checkpointInterval, - initVal = 1)) - } else { - None - } - - /* - Stack of nodes to train: (treeIndex, node) - The reason this is a stack is that we train many trees at once, but we want to focus on - completing trees, rather than training all simultaneously. If we are splitting nodes from - 1 tree, then the new nodes to split will be put at the top of this stack, so we will continue - training the same tree in the next iteration. This focus allows us to send fewer trees to - workers on each iteration; see topNodesForGroup below. - */ - val nodeStack = new mutable.ArrayStack[(Int, LearningNodeX)] - - val rng = new Random() - rng.setSeed(seed) - - // Allocate and queue root nodes. - val topNodes = Array.fill[LearningNodeX](numTrees)(LearningNodeX.emptyNode(nodeIndex = 1)) - Range(0, numTrees).foreach(treeIndex => nodeStack.push((treeIndex, topNodes(treeIndex)))) - - timer.stop("init") - - while (nodeStack.nonEmpty) { - // Collect some nodes to split, and choose features for each node (if subsampling). - // Each group of nodes may come from one or multiple trees, and at multiple levels. - // nodesForGroup: treeIndex --> learningNodes in tree - // treeToNodeToIndexInfo: treeIndex --> (global) learningNodes index in tree - // --> (node index in group, feature indices). - val (nodesForGroup, treeToNodeToIndexInfo) = - RandomForest.selectNodesToSplit(nodeStack, maxMemoryUsage, metadata, rng) - // Sanity check (should never occur): - assert(nodesForGroup.nonEmpty, - s"RandomForest selected empty nodesForGroup. Error for unknown reason.") - - // Only send trees to worker if they contain nodes being split this iteration. - // topNodesForGroup: treeIndex --> top node in tree - val topNodesForGroup: Map[Int, LearningNodeX] = - nodesForGroup.keys.map(treeIdx => treeIdx -> topNodes(treeIdx)).toMap - - // Choose node splits, and enqueue new nodes as needed. - timer.start("findBestSplits") - RandomForest.findBestSplits(baggedInput, metadata, topNodesForGroup, nodesForGroup, - treeToNodeToIndexInfo, baseSplits, nodeStack, timer, nodeIdCache, Some(extraParams)) - timer.stop("findBestSplits") - } - - baggedInput.unpersist() - - timer.stop("total") - - logInfo("Internal timing for DecisionTree:") - logInfo(s"$timer") - RandomForestInfo.timerResult = timer.toString() - - // Delete any remaining checkpoints used for node Id cache. - if (nodeIdCache.nonEmpty) { - try { - nodeIdCache.get.deleteAllCheckpoints() - } catch { - case e: IOException => - logWarning(s"delete all checkpoints failed. Error reason: ${e.getMessage}") - } - } - - val numFeatures = metadata.numFeatures - - parentUID match { - case Some(uid) => - if (strategy.algo == OldAlgo.Classification) { - topNodes.map { rootNode => - new DecisionTreeClassificationModel(uid, rootNode.toNode(splits), numFeatures, - strategy.getNumClasses) - } - } else { - topNodes.map { rootNode => - new DecisionTreeRegressionModel(uid, rootNode.toNode(splits), numFeatures) - } - } - case None => - if (strategy.algo == OldAlgo.Classification) { - topNodes.map { rootNode => - new DecisionTreeClassificationModel(rootNode.toNode(splits), numFeatures, - strategy.getNumClasses) - } - } else { - topNodes.map(rootNode => - new DecisionTreeRegressionModel(rootNode.toNode(splits), numFeatures)) - } - } - } - - /** - * Helper for binSeqOp, for data which can contain a mix of ordered and unordered features. - * - * For ordered features, a single bin is updated. - * For unordered features, bins correspond to subsets of categories; either the left or right bin - * for each subset is updated. - * - * @param agg Array storing aggregate calculation, with a set of sufficient statistics for - * each (feature, bin). - * @param treePoint Data point being aggregated. - * @param splits possible splits indexed (numFeatures)(numSplits) - * @param unorderedFeatures Set of indices of unordered features. - * @param instanceWeight Weight (importance) of instance in dataset. - */ - private def mixedBinSeqOp( - agg: DTStatsAggregator, - treePoint: TreePointX, - splits: Array[Array[SplitBase]], - unorderedFeatures: Set[Int], - instanceWeight: Int, - featuresForNode: Option[Array[Int]]): Unit = { - val numFeaturesPerNode = if (featuresForNode.nonEmpty) { - // Use subsampled features - featuresForNode.get.length - } else { - // Use all features - agg.metadata.numFeatures - } - // Iterate over features. - var featureIndexIdx = 0 - while (featureIndexIdx < numFeaturesPerNode) { - val featureIndex = if (featuresForNode.nonEmpty) { - featuresForNode.get.apply(featureIndexIdx) - } else { - featureIndexIdx - } - if (unorderedFeatures.contains(featureIndex)) { - // Unordered feature - val featureValue = treePoint.binnedFeatures.get(featureIndex) - val leftNodeFeatureOffset = agg.getFeatureOffset(featureIndexIdx) - // Update the left or right bin for each split. - val numSplits = agg.metadata.numSplits(featureIndex) - val featureSplits = splits(featureIndex) - var splitIndex = 0 - while (splitIndex < numSplits) { - if (featureSplits(splitIndex).shouldGoLeft(featureValue, featureSplits)) { - agg.featureUpdate(leftNodeFeatureOffset, splitIndex, treePoint.label, instanceWeight) - } - splitIndex += 1 - } - } else { - // Ordered feature - val binIndex = treePoint.binnedFeatures.get(featureIndex) - agg.update(featureIndexIdx, binIndex, treePoint.label, instanceWeight) - } - featureIndexIdx += 1 - } - } - - /** - * Helper for binSeqOp, for regression and for classification with only ordered features. - * - * For each feature, the sufficient statistics of one bin are updated. - * - * @param agg Array storing aggregate calculation, with a set of sufficient statistics for - * each (feature, bin). - * @param treePoint Data point being aggregated. - * @param instanceWeight Weight (importance) of instance in dataset. - */ - private def orderedBinSeqOp( - agg: DTStatsAggregator, - treePoint: TreePointX, - instanceWeight: Int, - featuresForNode: Option[Array[Int]]): Unit = { - val label = treePoint.label - - // Iterate over features. - if (featuresForNode.nonEmpty) { - // Use subsampled features - var featureIndexIdx = 0 - while (featureIndexIdx < featuresForNode.get.length) { - val binIndex = treePoint.binnedFeatures.get(featuresForNode.get.apply(featureIndexIdx)) - agg.update(featureIndexIdx, binIndex, label, instanceWeight) - featureIndexIdx += 1 - } - } else { - // Use all features - val numFeatures = agg.metadata.numFeatures - var featureIndex = 0 - while (featureIndex < numFeatures) { - val binIndex = treePoint.binnedFeatures.get(featureIndex) - agg.update(featureIndex, binIndex, label, instanceWeight) - featureIndex += 1 - } - } - } - - /** - * Given a group of nodes, this finds the best split for each node. - * - * @param input Training data: RDD of [[TreePointX]] - * @param metadata Learning and dataset metadata - * @param topNodesForGroup For each tree in group, tree index -> root node. - * Used for matching instances with nodes. - * @param nodesForGroup Mapping: treeIndex --> nodes to be split in tree - * @param treeToNodeToIndexInfo Mapping: treeIndex --> (global) learningNodes index in tree - * --> (node index in group, feature indices) - * feature indices: probably parts of full features. - * Mapping: treeIndex --> nodeIndex --> nodeIndexInfo, - * where nodeIndexInfo stores the index in the group and the - * feature subsets (if using feature subsets). - * @param splits possible splits for all features, indexed (numFeatures)(numSplits) - * @param nodeStack Queue of nodes to split, with values (treeIndex, node). - * Updated with new non-leaf nodes which are created. - * @param nodeIdCache Node Id cache containing an RDD of Array[Int] where - * each value in the array is the data point's node Id - * for a corresponding tree. This is used to prevent the need - * to pass the entire tree to the executors during - * the node stat aggregation phase. - */ - private[tree] def findBestSplits( - input: RDD[BaggedPoint[TreePointX]], - metadata: DecisionTreeMetadata, - topNodesForGroup: Map[Int, LearningNodeX], - nodesForGroup: Map[Int, Array[LearningNodeX]], - treeToNodeToIndexInfo: Map[Int, Map[Int, NodeIndexInfo]], - splits: Array[Array[SplitBase]], - nodeStack: mutable.ArrayStack[(Int, LearningNodeX)], - timer: TimeTracker = new TimeTracker, - nodeIdCache: Option[NodeIdCache] = None, - extraParams: Option[RFExtraParams] = None): Unit = { - - /* - * The high-level descriptions of the best split optimizations are noted here. - * - * *Group-wise training* - * We perform bin calculations for groups of nodes to reduce the number of - * passes over the data. Each iteration requires more computation and storage, - * but saves several iterations over the data. - * - * *Bin-wise computation* - * We use a bin-wise best split computation strategy instead of a straightforward best split - * computation strategy. Instead of analyzing each sample for contribution to the left/right - * child node impurity of every split, we first categorize each feature of a sample into a - * bin. We exploit this structure to calculate aggregates for bins and then use these aggregates - * to calculate information gain for each split. - * - * *Aggregation over partitions* - * Instead of performing a flatMap/reduceByKey operation, we exploit the fact that we know - * the number of splits in advance. Thus, we store the aggregates (at the appropriate - * indices) in a single array for all bins and rely upon the RDD aggregate method to - * drastically reduce the communication overhead. - */ - - val bcVariables = if (extraParams.isEmpty) false else extraParams.get.bcVariables - /** numNodes: Number of nodes in this group */ - val numNodes = nodesForGroup.values.map(_.length).sum - logDebug(s"numNodes = ${numNodes}") - logDebug(s"numFeatures = ${metadata.numFeatures}") - logDebug(s"numClasses = ${metadata.numClasses}") - logDebug(s"isMulticlass = ${metadata.isMulticlass}") - logDebug(s"isMulticlassWithCategoricalFeatures = " + - s"${metadata.isMulticlassWithCategoricalFeatures}") - logDebug(s"using nodeIdCache = ${ nodeIdCache.nonEmpty.toString}") - - val groupInfo = RFUtils.getGroupInfo(numNodes, treeToNodeToIndexInfo, extraParams) - - val splitsBc = if (bcVariables) Some(input.sparkContext.broadcast(splits)) else Option.empty - val splitsOption = if (bcVariables) Option.empty else Some(splits) - - - /** - * Performs a sequential aggregation over a partition for a particular tree and node. - * - * For each feature, the aggregate sufficient statistics are updated for the relevant - * bins. - * - * @param treeIndex Index of the tree that we want to perform aggregation for. - * @param nodeInfo The node info for the tree node. - * @param agg Array storing aggregate calculation, with a set of sufficient statistics - * for each (node, feature, bin). - * @param baggedPoint Data point being aggregated. - */ - def nodeBinSeqOp( - treeIndex: Int, - nodeInfo: NodeIndexInfo, - agg: Array[DTStatsAggregator], - splitsBcv: Array[Array[SplitBase]], - baggedPoint: BaggedPoint[TreePointX]): Unit = { - if (RFUtils.isValidNodeInfo(nodeInfo, agg)) { - val aggNodeIndex = nodeInfo.nodeIndexInGroup - val featuresForNode = nodeInfo.featureSubset - val instanceWeight = baggedPoint.subsampleWeights(treeIndex) - if (metadata.unorderedFeatures.isEmpty) { - orderedBinSeqOp(agg(aggNodeIndex), baggedPoint.datum, instanceWeight, featuresForNode) - } else { - mixedBinSeqOp(agg(aggNodeIndex), baggedPoint.datum, splitsBcv, - metadata.unorderedFeatures, instanceWeight, featuresForNode) - } - agg(aggNodeIndex).updateParent(baggedPoint.datum.label, instanceWeight) - } - } - - /** - * Performs a sequential aggregation over a partition. - * - * Each data point contributes to one node. For each feature, - * the aggregate sufficient statistics are updated for the relevant bins. - * - * @param agg Array storing aggregate calculation, with a set of sufficient statistics for - * each (node, feature, bin). - * @param baggedPoint Data point being aggregated. - * @return agg - */ - def binSeqOp( - agg: Array[DTStatsAggregator], - baggedPoint: BaggedPoint[TreePointX], - splitsBcv: Array[Array[SplitBase]], - sampleId: Short): Array[DTStatsAggregator] = { - // TODO: treeToNodeToIndexInfo and topNodesForGroup(include sub-nodes) weren't broadcast. - treeToNodeToIndexInfo.foreach { case (treeIndex, nodeIndexToInfo) => - if (RFUtils.isSubSampled(baggedPoint, groupInfo, treeIndex, sampleId)) { - val nodeIndex = - topNodesForGroup(treeIndex).predictImpl(baggedPoint.datum.binnedFeatures, splitsBcv) - nodeBinSeqOp(treeIndex, nodeIndexToInfo.getOrElse(nodeIndex, null), - agg, splitsBcv, baggedPoint) - } - } - agg - } - - /** - * Do the same thing as binSeqOp, but with nodeIdCache. - */ - def binSeqOpWithNodeIdCache( - agg: Array[DTStatsAggregator], - splitsBcv: Array[Array[SplitBase]], - dataPoint: (BaggedPoint[TreePointX], Array[Int])): Array[DTStatsAggregator] = { - treeToNodeToIndexInfo.foreach { case (treeIndex, nodeIndexToInfo) => - val baggedPoint = dataPoint._1 - val nodeIdCache = dataPoint._2 - val nodeIndex = nodeIdCache(treeIndex) - nodeBinSeqOp(treeIndex, nodeIndexToInfo.getOrElse(nodeIndex, null), - agg, splitsBcv, baggedPoint) - } - - agg - } - - /** - * Get node index in group --> features indices map, - * which is a short cut to find feature indices for a node given node index in group. - */ - def getNodeToFeatures( - treeToNodeToIndexInfo: Map[Int, Map[Int, NodeIndexInfo]]): Option[Map[Int, Array[Int]]] = { - if (!metadata.subsamplingFeatures) { - None - } else { - val mutableNodeToFeatures = new mutable.HashMap[Int, Array[Int]]() - treeToNodeToIndexInfo.values.foreach { nodeIdToNodeInfo => - nodeIdToNodeInfo.values.foreach { nodeIndexInfo => - assert(nodeIndexInfo.featureSubset.isDefined) - mutableNodeToFeatures(nodeIndexInfo.nodeIndexInGroup) = nodeIndexInfo.featureSubset.get - } - } - Some(mutableNodeToFeatures.toMap) - } - } - - // array of nodes to train indexed by node index in group - val nodes = new Array[LearningNodeX](numNodes) - nodesForGroup.foreach { case (treeIndex, nodesForTree) => - nodesForTree.foreach { node => - nodes(treeToNodeToIndexInfo(treeIndex)(node.id).nodeIndexInGroup) = node - } - } - - // Calculate best splits for all nodes in the group - timer.start("chooseSplits") - - // In each partition, iterate all instances and compute aggregate stats for each node, - // yield a (nodeIndex, nodeAggregateStats) pair for each node. - // After a `reduceByKey` operation, - // stats of a node will be shuffled to a particular partition and be combined together, - // then best splits for nodes are found there. - // Finally, only best Splits for nodes are collected to driver to construct decision tree. - // nodeToFeatures: node index in group -> selected feature indexes - val nodeToFeatures = getNodeToFeatures(treeToNodeToIndexInfo) - val nodeToFeaturesBc = input.sparkContext.broadcast(nodeToFeatures) - - /** partitionAggregates RDD: node index in group --> nodeStats */ - val partitionAggregates: RDD[(Int, DTStatsAggregator)] = if (nodeIdCache.nonEmpty) { - input.zip(nodeIdCache.get.nodeIdsForInstances).mapPartitions { points => - // Construct a nodeStatsAggregators array to hold node aggregate stats, - // each node will have a nodeStatsAggregator - val nodeStatsAggregators = Array.tabulate(numNodes) { nodeIndex => - val featuresForNode = nodeToFeaturesBc.value.map { nodeToFeatures => - nodeToFeatures(nodeIndex) - } - new DTStatsAggregator(metadata, featuresForNode) - } - - val splitsBcv = if (bcVariables) splitsBc.get.value else splitsOption.get - // iterator all instances in current partition and update aggregate stats - points.foreach(binSeqOpWithNodeIdCache(nodeStatsAggregators, splitsBcv, _)) - - // transform nodeStatsAggregators array to (nodeIndex, nodeAggregateStats) pairs, - // which can be combined with other partition using `reduceByKey` - nodeStatsAggregators.view.zipWithIndex.map(_.swap).iterator - } - } else { - input.mapPartitions { points => - val (firstPointOption, nodeStatsAggregators) = - RFUtils.initNodeStatsAgg(numNodes, nodeToFeaturesBc, metadata, points, groupInfo) - if (firstPointOption.isEmpty) { - Iterator.empty - } else { - val firstPoint = firstPointOption.get - val sampleId = firstPoint.sampleId - - val splitsBcv = if (bcVariables) splitsBc.get.value else splitsOption.get - binSeqOp(nodeStatsAggregators, firstPoint, splitsBcv, sampleId) - - - // iterator all instances in current partition and update aggregate stats - points.foreach(binSeqOp(nodeStatsAggregators, _, splitsBcv, sampleId)) - - // transform nodeStatsAggregators array to (nodeIndex, nodeAggregateStats) pairs, - // which can be combined with other partition using `reduceByKey` - nodeStatsAggregators.view.zipWithIndex - .filter(v => RFUtils.isValidAgg(v._1)).map(_.swap).iterator - } - } - } - - val nodeToBestSplits = partitionAggregates.reduceByKey((a, b) => a.merge(b)).map { - case (nodeIndex, aggStats) => - val featuresForNode = nodeToFeaturesBc.value.flatMap { nodeToFeatures => - Some(nodeToFeatures(nodeIndex)) - } - - val splitsBcv = if (bcVariables) splitsBc.get.value else splitsOption.get - // find best split for each node - val (split: SplitBase, stats: ImpurityStats) = - binsToBestSplit(aggStats, splitsBcv, featuresForNode, nodes(nodeIndex)) - (nodeIndex, (split, stats)) - }.collectAsMap() - - timer.stop("chooseSplits") - - val nodeIdUpdaters = if (nodeIdCache.nonEmpty) { - Array.fill[mutable.Map[Int, NodeIndexUpdater]]( - metadata.numTrees)(mutable.Map[Int, NodeIndexUpdater]()) - } else { - null - } - // Iterate over all nodes in this group. - nodesForGroup.foreach { case (treeIndex, nodesForTree) => - nodesForTree.foreach { node => - val nodeIndex = node.id - val nodeInfo = treeToNodeToIndexInfo(treeIndex)(nodeIndex) - val aggNodeIndex = nodeInfo.nodeIndexInGroup - val (split: SplitBase, stats: ImpurityStats) = - nodeToBestSplits(aggNodeIndex) - logDebug(s"best split = ${split}") - - // Extract info for this node. Create children if not leaf. - val isLeaf = - (stats.gain <= 0) || (LearningNodeX.indexToLevel(nodeIndex) == metadata.maxDepth) - node.isLeaf = isLeaf - node.stats = stats - logDebug(s"Node = ${node}") - - if (!isLeaf) { - node.split = Some(split) - val childIsLeaf = (LearningNodeX.indexToLevel(nodeIndex) + 1) == metadata.maxDepth - val leftChildIsLeaf = childIsLeaf || (stats.leftImpurity == 0.0) - val rightChildIsLeaf = childIsLeaf || (stats.rightImpurity == 0.0) - node.leftChild = Some(LearningNodeX(LearningNodeX.leftChildIndex(nodeIndex), - leftChildIsLeaf, ImpurityStats.getEmptyImpurityStats(stats.leftImpurityCalculator))) - node.rightChild = Some(LearningNodeX(LearningNodeX.rightChildIndex(nodeIndex), - rightChildIsLeaf, ImpurityStats.getEmptyImpurityStats(stats.rightImpurityCalculator))) - - if (nodeIdCache.nonEmpty) { - val nodeIndexUpdater = NodeIndexUpdater( - split = split, - nodeIndex = nodeIndex) - nodeIdUpdaters(treeIndex).put(nodeIndex, nodeIndexUpdater) - } - - // enqueue left child and right child if they are not leaves - if (!leftChildIsLeaf) { - nodeStack.push((treeIndex, node.leftChild.get)) - } - if (!rightChildIsLeaf) { - nodeStack.push((treeIndex, node.rightChild.get)) - } - - logDebug(s"leftChildIndex = ${node.leftChild.get.id}" + - s", impurity = ${stats.leftImpurity}") - logDebug(s"rightChildIndex = ${node.rightChild.get.id}" + - s", impurity = ${stats.rightImpurity}") - } - } - } - - if (nodeIdCache.nonEmpty) { - // Update the cache if needed. - nodeIdCache.get.updateNodeIndices(input, nodeIdUpdaters, splits) - } - } - - /** - * Calculate the impurity statistics for a given (feature, split) based upon left/right - * aggregates. - * - * @param stats the recycle impurity statistics for this feature's all splits, - * only 'impurity' and 'impurityCalculator' are valid between each iteration - * @param leftImpurityCalculator left node aggregates for this (feature, split) - * @param rightImpurityCalculator right node aggregate for this (feature, split) - * @param metadata learning and dataset metadata for DecisionTree - * @return Impurity statistics for this (feature, split) - */ - private def calculateImpurityStats( - stats: ImpurityStats, - leftImpurityCalculator: ImpurityCalculator, - rightImpurityCalculator: ImpurityCalculator, - metadata: DecisionTreeMetadata): ImpurityStats = { - - val parentImpurityCalculator: ImpurityCalculator = if (stats == null) { - leftImpurityCalculator.copy.add(rightImpurityCalculator) - } else { - stats.impurityCalculator - } - - val impurity: Double = if (stats == null) { - parentImpurityCalculator.calculate() - } else { - stats.impurity - } - - val leftCount = leftImpurityCalculator.count - val rightCount = rightImpurityCalculator.count - - val totalCount = leftCount + rightCount - - // If left child or right child doesn't satisfy minimum instances per node, - // then this split is invalid, return invalid information gain stats. - if ((leftCount < metadata.minInstancesPerNode) || - (rightCount < metadata.minInstancesPerNode)) { - return ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator) - } - - val leftImpurity = leftImpurityCalculator.calculate() // Note: This equals 0 if count = 0 - val rightImpurity = rightImpurityCalculator.calculate() - - val leftWeight = leftCount / totalCount.toDouble - val rightWeight = rightCount / totalCount.toDouble - - val gain = impurity - leftWeight * leftImpurity - rightWeight * rightImpurity - - // if information gain doesn't satisfy minimum information gain, - // then this split is invalid, return invalid information gain stats. - if (gain < metadata.minInfoGain) { - return ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator) - } - - new ImpurityStats(gain, impurity, parentImpurityCalculator, - leftImpurityCalculator, rightImpurityCalculator) - } - - /** - * Find the best split for a node. - * - * @param binAggregates Bin statistics. - * @return tuple for best split: (Split, information gain, prediction at node) - */ - private[tree] def binsToBestSplit( - binAggregates: DTStatsAggregator, - splits: Array[Array[SplitBase]], - featuresForNode: Option[Array[Int]], - node: LearningNodeX): (SplitBase, ImpurityStats) = { - - // Calculate InformationGain and ImpurityStats if current node is top node - val level = LearningNodeX.indexToLevel(node.id) - var gainAndImpurityStats: ImpurityStats = if (level == 0) { - null - } else { - node.stats - } - - val validFeatureSplits = - Range(0, binAggregates.metadata.numFeaturesPerNode).view.map { featureIndexIdx => - featuresForNode.map(features => (featureIndexIdx, features(featureIndexIdx))) - .getOrElse((featureIndexIdx, featureIndexIdx)) - }.withFilter { case (_, featureIndex) => - binAggregates.metadata.numSplits(featureIndex) != 0 - } - - // For each (feature, split), calculate the gain, and select the best (feature, split). - val splitsAndImpurityInfo = - validFeatureSplits.map { case (featureIndexIdx, featureIndex) => - val numSplits = binAggregates.metadata.numSplits(featureIndex) - if (binAggregates.metadata.isContinuous(featureIndex)) { - // Cumulative sum (scanLeft) of bin statistics. - // Afterwards, binAggregates for a bin is the sum of aggregates for - // that bin + all preceding bins. - val nodeFeatureOffset = binAggregates.getFeatureOffset(featureIndexIdx) - var splitIndex = 0 - while (splitIndex < numSplits) { - binAggregates.mergeForFeature(nodeFeatureOffset, splitIndex + 1, splitIndex) - splitIndex += 1 - } - // Find best split. - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { case splitIdx => - val leftChildStats = binAggregates.getImpurityCalculator(nodeFeatureOffset, splitIdx) - val rightChildStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, numSplits) - rightChildStats.subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIdx, gainAndImpurityStats) - }.maxBy(_._2.gain) - (splits(featureIndex)(bestFeatureSplitIndex), bestFeatureGainStats) - } else if (binAggregates.metadata.isUnordered(featureIndex)) { - // Unordered categorical feature - val leftChildOffset = binAggregates.getFeatureOffset(featureIndexIdx) - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { splitIndex => - val leftChildStats = binAggregates.getImpurityCalculator(leftChildOffset, splitIndex) - val rightChildStats = binAggregates.getParentImpurityCalculator() - .subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIndex, gainAndImpurityStats) - }.maxBy(_._2.gain) - (splits(featureIndex)(bestFeatureSplitIndex), bestFeatureGainStats) - } else { - // Ordered categorical feature - val nodeFeatureOffset = binAggregates.getFeatureOffset(featureIndexIdx) - val numCategories = binAggregates.metadata.numBins(featureIndex) - - /* Each bin is one category (feature value). - * The bins are ordered based on centroidForCategories, and this ordering determines which - * splits are considered. (With K categories, we consider K - 1 possible splits.) - * - * centroidForCategories is a list: (category, centroid) - */ - val centroidForCategories = Range(0, numCategories).map { case featureValue => - val categoryStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, featureValue) - val centroid = if (categoryStats.count != 0) { - if (binAggregates.metadata.isMulticlass) { - // multiclass classification - // For categorical variables in multiclass classification, - // the bins are ordered by the impurity of their corresponding labels. - categoryStats.calculate() - } else if (binAggregates.metadata.isClassification) { - // binary classification - // For categorical variables in binary classification, - // the bins are ordered by the count of class 1. - categoryStats.stats(1) - } else { - // regression - // For categorical variables in regression and binary classification, - // the bins are ordered by the prediction. - categoryStats.predict - } - } else { - Double.MaxValue - } - (featureValue, centroid) - } - - logDebug(s"Centroids for categorical variable: ${centroidForCategories.mkString(",")}") - - // bins sorted by centroids - val categoriesSortedByCentroid = centroidForCategories.toList.sortBy(_._2) - - logDebug("Sorted centroids for categorical variable = " + - categoriesSortedByCentroid.mkString(",")) - - // Cumulative sum (scanLeft) of bin statistics. - // Afterwards, binAggregates for a bin is the sum of aggregates for - // that bin + all preceding bins. - var splitIndex = 0 - while (splitIndex < numSplits) { - val currentCategory = categoriesSortedByCentroid(splitIndex)._1 - val nextCategory = categoriesSortedByCentroid(splitIndex + 1)._1 - binAggregates.mergeForFeature(nodeFeatureOffset, nextCategory, currentCategory) - splitIndex += 1 - } - // lastCategory = index of bin with total aggregates for this (node, feature) - val lastCategory = categoriesSortedByCentroid.last._1 - // Find best split. - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { splitIndex => - val featureValue = categoriesSortedByCentroid(splitIndex)._1 - val leftChildStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, featureValue) - val rightChildStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, lastCategory) - rightChildStats.subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIndex, gainAndImpurityStats) - }.maxBy(_._2.gain) - val categoriesForSplit = - categoriesSortedByCentroid.map(_._1.toDouble).slice(0, bestFeatureSplitIndex + 1) - val bestFeatureSplit = - new CategoricalSplit(featureIndex, categoriesForSplit.toArray, numCategories) - (bestFeatureSplit, bestFeatureGainStats) - } - } - - val (bestSplit, bestSplitStats) = - if (splitsAndImpurityInfo.isEmpty) { - // If no valid splits for features, then this split is invalid, - // return invalid information gain stats. Take any split and continue. - // Splits is empty, so arbitrarily choose to split on any threshold - val dummyFeatureIndex = featuresForNode.map(_.head).getOrElse(0) - val parentImpurityCalculator = binAggregates.getParentImpurityCalculator() - if (binAggregates.metadata.isContinuous(dummyFeatureIndex)) { - (new ContinuousSplit(dummyFeatureIndex, 0), - ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator)) - } else { - val numCategories = binAggregates.metadata.featureArity(dummyFeatureIndex) - (new CategoricalSplit(dummyFeatureIndex, Array(), numCategories), - ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator)) - } - } else { - splitsAndImpurityInfo.maxBy(_._2.gain) - } - (bestSplit, bestSplitStats) - } - - /** - * Returns splits for decision tree calculation. - * Continuous and categorical features are handled differently. - * - * Continuous features: - * For each feature, there are numBins - 1 possible splits representing the possible binary - * decisions at each node in the tree. - * This finds locations (feature values) for splits using a subsample of the data. - * - * Categorical features: - * For each feature, there is 1 bin per split. - * Splits and bins are handled in 2 ways: - * (a) "unordered features" - * For multiclass classification with a low-arity feature - * (i.e., if isMulticlass && isSpaceSufficientForAllCategoricalSplits), - * the feature is split based on subsets of categories. - * (b) "ordered features" - * For regression and binary classification, - * and for multiclass classification with a high-arity feature, - * there is one bin per category. - * - * @param input Training data: RDD of [[LabeledPoint]] - * @param metadata Learning and dataset metadata - * @param seed random seed - * @return Splits, an Array of [[Split]] - * of size (numFeatures, numSplits) - */ - protected[tree] def findSplits( - input: RDD[LabeledPoint], - metadata: DecisionTreeMetadata, - seed: Long): Array[Array[Split]] = { - - logDebug(s"isMulticlass = ${metadata.isMulticlass}") - - val numFeatures = metadata.numFeatures - - // Sample the input only if there are continuous features. - val continuousFeatures = Range(0, numFeatures).filter(metadata.isContinuous) - val sampledInput = if (continuousFeatures.nonEmpty) { - // Calculate the number of samples for approximate quantile calculation. - val requiredSamples = math.max(metadata.maxBins * metadata.maxBins, 10000) - val fraction = if (requiredSamples < metadata.numExamples) { - requiredSamples.toDouble / metadata.numExamples - } else { - 1.0 - } - logDebug(s"fraction of data used for calculating quantiles = ${fraction}") - input.sample(withReplacement = false, fraction, new XORShiftRandom(seed).nextInt()) - } else { - input.sparkContext.emptyRDD[LabeledPoint] - } - - findSplitsBySorting(sampledInput, metadata, continuousFeatures) - } - - private def findSplitsBySorting( - input: RDD[LabeledPoint], - metadata: DecisionTreeMetadata, - continuousFeatures: IndexedSeq[Int]): Array[Array[Split]] = { - - val continuousSplits: scala.collection.Map[Int, Array[Split]] = { - // reduce the parallelism for split computations when there are less - // continuous features than input partitions. this prevents tasks from - // being spun up that will definitely do no work. - val numPartitions = math.min(continuousFeatures.length, input.partitions.length) - - input - .flatMap(point => continuousFeatures.map(idx => (idx, point.features(idx)))) - .groupByKey(numPartitions) - .map { case (idx, samples) => - val thresholds = findSplitsForContinuousFeature(samples, metadata, idx) - val splits: Array[Split] = thresholds.map(thresh => new ContinuousSplit(idx, thresh)) - logDebug(s"featureIndex = $idx, numSplits = ${splits.length}") - (idx, splits) - }.collectAsMap() - } - - val numFeatures = metadata.numFeatures - val splits: Array[Array[Split]] = Array.tabulate(numFeatures) { - case i if metadata.isContinuous(i) => - val split = continuousSplits(i) - metadata.setNumSplits(i, split.length) - split - - case i if metadata.isCategorical(i) && metadata.isUnordered(i) => - // Unordered features - // 2^(maxFeatureValue - 1) - 1 combinations - val featureArity = metadata.featureArity(i) - Array.tabulate[Split](metadata.numSplits(i)) { splitIndex => - val categories = extractMultiClassCategories(splitIndex + 1, featureArity) - new CategoricalSplit(i, categories.toArray, featureArity) - } - - case i if metadata.isCategorical(i) => - // Ordered features - // Splits are constructed as needed during training. - Array.empty[Split] - } - splits - } - - /** - * Nested method to extract list of eligible categories given an index. It extracts the - * position of ones in a binary representation of the input. If binary - * representation of an number is 01101 (13), the output list should (3.0, 2.0, - * 0.0). The maxFeatureValue depict the number of rightmost digits that will be tested for ones. - */ - private[tree] def extractMultiClassCategories( - input: Int, - maxFeatureValue: Int): List[Double] = { - var categories = List[Double]() - var j = 0 - var bitShiftedInput = input - while (j < maxFeatureValue) { - if (bitShiftedInput % 2 != 0) { - // updating the list of categories. - categories = j.toDouble :: categories - } - // Right shift by one - bitShiftedInput = bitShiftedInput >> 1 - j += 1 - } - categories - } - - /** - * Find splits for a continuous feature - * NOTE: Returned number of splits is set based on `featureSamples` and - * could be different from the specified `numSplits`. - * The `numSplits` attribute in the `DecisionTreeMetadata` class will be set accordingly. - * - * @param featureSamples feature values of each sample - * @param metadata decision tree metadata - * NOTE: `metadata.numbins` will be changed accordingly - * if there are not enough splits to be found - * @param featureIndex feature index to find splits - * @return array of split thresholds - */ - private[tree] def findSplitsForContinuousFeature( - featureSamples: Iterable[Double], - metadata: DecisionTreeMetadata, - featureIndex: Int): Array[Double] = { - require(metadata.isContinuous(featureIndex), - "findSplitsForContinuousFeature can only be used to find splits for a continuous feature.") - - val splits: Array[Double] = if (featureSamples.isEmpty) { - Array.empty[Double] - } else { - val numSplits = metadata.numSplits(featureIndex) - - // get count for each distinct value - val (valueCountMap, numSamples) = featureSamples.foldLeft((Map.empty[Double, Int], 0)) { - case ((m, cnt), x) => - (m + ((x, m.getOrElse(x, 0) + 1)), cnt + 1) - } - // sort distinct values - val valueCounts = valueCountMap.toSeq.sortBy(_._1).toArray - - val possibleSplits = valueCounts.length - 1 - if (possibleSplits == 0) { - // constant feature - Array.empty[Double] - } else if (possibleSplits <= numSplits) { - // if possible splits is not enough or just enough, just return all possible splits - (1 to possibleSplits) - .map(index => (valueCounts(index - 1)._1 + valueCounts(index)._1) / 2.0) - .toArray - } else { - // stride between splits - val stride: Double = numSamples.toDouble / (numSplits + 1) - logDebug(s"stride = ${stride}") - - // iterate `valueCount` to find splits - val splitsBuilder = mutable.ArrayBuilder.make[Double] - var index = 1 - // currentCount: sum of counts of values that have been visited - var currentCount = valueCounts(0)._2 - // targetCount: target value for `currentCount`. - // If `currentCount` is closest value to `targetCount`, - // then current value is a split threshold. - // After finding a split threshold, `targetCount` is added by stride. - var targetCount = stride - while (index < valueCounts.length) { - val previousCount = currentCount - currentCount += valueCounts(index)._2 - val previousGap = math.abs(previousCount - targetCount) - val currentGap = math.abs(currentCount - targetCount) - // If adding count of current value to currentCount - // makes the gap between currentCount and targetCount smaller, - // previous value is a split threshold. - if (previousGap < currentGap) { - splitsBuilder += (valueCounts(index - 1)._1 + valueCounts(index)._1) / 2.0 - targetCount += stride - } - index += 1 - } - - splitsBuilder.result() - } - } - splits - } - - private[tree] class NodeIndexInfo( - val nodeIndexInGroup: Int, - val featureSubset: Option[Array[Int]]) extends Serializable - - /** - * Pull nodes off of the queue, and collect a group of nodes to be split on this iteration. - * This tracks the memory usage for aggregates and stops adding nodes when too much memory - * will be needed; this allows an adaptive number of nodes since different nodes may require - * different amounts of memory (if featureSubsetStrategy is not "all"). - * - * @param nodeStack Queue of nodes to split. - * @param maxMemoryUsage Bound on size of aggregate statistics. - * @return (nodesForGroup, treeToNodeToIndexInfo). - * nodesForGroup holds the nodes to split: treeIndex --> nodes in tree. - * - * treeToNodeToIndexInfo holds indices selected features for each node: - * treeIndex --> (global) node index --> (node index in group, feature indices). - * The (global) node index is the index in the tree; the node index in group is the - * index in [0, numNodesInGroup) of the node in this group. - * The feature indices are None if not subsampling features. - */ - private[tree] def selectNodesToSplit( - nodeStack: mutable.ArrayStack[(Int, LearningNodeX)], - maxMemoryUsage: Long, - metadata: DecisionTreeMetadata, - rng: Random): (Map[Int, Array[LearningNodeX]], Map[Int, Map[Int, NodeIndexInfo]]) = { - // Collect some nodes to split: - // nodesForGroup(treeIndex) = nodes to split - val mutableNodesForGroup = new mutable.HashMap[Int, mutable.ArrayBuffer[LearningNodeX]]() - val mutableTreeToNodeToIndexInfo = - new mutable.HashMap[Int, mutable.HashMap[Int, NodeIndexInfo]]() - var memUsage: Long = 0L - var numNodesInGroup = 0 - // If maxMemoryInMB is set very small, we want to still try to split 1 node, - // so we allow one iteration if memUsage == 0. - var groupDone = false - while (nodeStack.nonEmpty && !groupDone) { - val (treeIndex, node) = nodeStack.top - // Choose subset of features for node (if subsampling). - val featureSubset: Option[Array[Int]] = if (metadata.subsamplingFeatures) { - Some(SamplingUtils.reservoirSampleAndCount(Range(0, - metadata.numFeatures).iterator, metadata.numFeaturesPerNode, rng.nextLong())._1) - } else { - None - } - // Check if enough memory remains to add this node to the group. - val nodeMemUsage = RandomForest.aggregateSizeForNode(metadata, featureSubset) * 8L - if (memUsage + nodeMemUsage <= maxMemoryUsage || memUsage == 0) { - nodeStack.pop() - mutableNodesForGroup.getOrElseUpdate(treeIndex, new mutable.ArrayBuffer[LearningNodeX]()) += - node - mutableTreeToNodeToIndexInfo - .getOrElseUpdate(treeIndex, new mutable.HashMap[Int, NodeIndexInfo]())(node.id) - = new NodeIndexInfo(numNodesInGroup, featureSubset) - numNodesInGroup += 1 - memUsage += nodeMemUsage - } else { - groupDone = true - } - } - if (memUsage > maxMemoryUsage) { - // If maxMemoryUsage is 0, we should still allow splitting 1 node. - logWarning(s"Tree learning is using approximately $memUsage bytes per iteration, which" + - s" exceeds requested limit maxMemoryUsage=$maxMemoryUsage. This allows splitting" + - s" $numNodesInGroup nodes in this iteration.") - } - logWarning(f"[this group] actualMemUsage: ${memUsage/(1024d*1024d)}%.2f MB," + - f" maxMemoryUsage: ${maxMemoryUsage/(1024d*1024d)}%.2f MB.") - // Convert mutable maps to immutable ones. - val nodesForGroup: Map[Int, Array[LearningNodeX]] = - mutableNodesForGroup.mapValues(_.toArray).toMap - val treeToNodeToIndexInfo = mutableTreeToNodeToIndexInfo.mapValues(_.toMap).toMap - (nodesForGroup, treeToNodeToIndexInfo) - } - - /** - * Get the number of values to be stored for this node in the bin aggregates. - * - * @param featureSubset Indices of features which may be split at this node. - * If None, then use all features. - */ - private def aggregateSizeForNode( - metadata: DecisionTreeMetadata, - featureSubset: Option[Array[Int]]): Long = { - val totalBins = if (featureSubset.nonEmpty) { - featureSubset.get.map(featureIndex => metadata.numBins(featureIndex).toLong).sum - } else { - metadata.numBins.map(_.toLong).sum - } - if (metadata.isClassification) { - metadata.numClasses * totalBins - } else { - 3 * totalBins - } - } -} - -object RandomForestInfo { - var timerResult: String = "" -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForest4GBDTX.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForest4GBDTX.scala deleted file mode 100644 index 101adf1..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForest4GBDTX.scala +++ /dev/null @@ -1,621 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree.impl - -import it.unimi.dsi.fastutil.doubles.DoubleArrayList -import it.unimi.dsi.fastutil.ints.{Int2ObjectOpenHashMap, IntArrayList} -import it.unimi.dsi.fastutil.objects.ObjectArrayList -import scala.collection.mutable -import scala.util.Random - -import org.apache.spark.broadcast.Broadcast -import org.apache.spark.internal.Logging -import org.apache.spark.ml.classification.DecisionTreeClassificationModel -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.regression.DecisionTreeRegressionModel -import org.apache.spark.ml.tree._ -import org.apache.spark.ml.tree.impl.GradientBoostedTreesCore.NodeIndexInfo -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, Strategy => OldStrategy} -import org.apache.spark.mllib.tree.model.ImpurityStats -import org.apache.spark.rdd.RDD -import org.apache.spark.util.random.{SamplingUtils, XORShiftRandom} - - -/** - * ALGORITHM - * - * This is a sketch of the algorithm to help new developers. - * - * The algorithm partitions data by instances (rows). - * On each iteration, the algorithm splits a set of nodes. In order to choose the best split - * for a given node, sufficient statistics are collected from the distributed data. - * For each node, the statistics are collected to some worker node, and that worker selects - * the best split. - * - * This setup requires discretization of continuous features. This binning is done in the - * findSplits() method during initialization, after which each continuous feature becomes - * an ordered discretized feature with at most maxBins possible values. - * - * The main loop in the algorithm operates on a queue of nodes (nodeStack). These nodes - * lie at the periphery of the tree being trained. If multiple trees are being trained at once, - * then this queue contains nodes from all of them. Each iteration works roughly as follows: - * On the master node: - * - Some number of nodes are pulled off of the queue (based on the amount of memory - * required for their sufficient statistics). - * - For random forests, if featureSubsetStrategy is not "all," then a subset of candidate - * features are chosen for each node. See method selectNodesToSplit(). - * On worker nodes, via method findBestSplits(): - * - The worker makes one pass over its subset of instances. - * - For each (tree, node, feature, split) tuple, the worker collects statistics about - * splitting. Note that the set of (tree, node) pairs is limited to the nodes selected - * from the queue for this iteration. The set of features considered can also be limited - * based on featureSubsetStrategy. - * - For each node, the statistics for that node are aggregated to a particular worker - * via reduceByKey(). The designated worker chooses the best (feature, split) pair, - * or chooses to stop splitting if the stopping criteria are met. - * On the master node: - * - The master collects all decisions about splitting nodes and updates the model. - * - The updated model is passed to the workers on the next iteration. - * This process continues until the node queue is empty. - * - * Most of the methods in this implementation support the statistics aggregation, which is - * the heaviest part of the computation. In general, this implementation is bound by either - * the cost of statistics computation on workers or by communicating the sufficient statistics. - */ -private[spark] object RandomForest4GBDTX extends Logging { - - /** - * Train a random forest. - * - * @param input Training data: RDD of `LabeledPoint` - * @return an unweighted set of trees - */ - def runX( - labelArrayBc: Broadcast[DoubleArrayList], - processedInput: RDD[(Int, (IntArrayList, ObjectArrayList[Split]))], - metadata: DecisionTreeMetadata, - splits: Array[Array[Split]], - strategy: OldStrategy, - numTrees: Int, - seed: Long, - input: RDD[TreePoint], - rawPartInfoBc: Broadcast[Int2ObjectOpenHashMap[IntArrayList]], - parentUID: Option[String] = None): Array[DecisionTreeModel] = { - - val timer = new TimeTracker() - - timer.start("total") - - timer.start("init") - - // depth of the decision tree - val maxDepth = strategy.maxDepth - require(maxDepth <= 30, - s"DecisionTree currently only supports maxDepth <= 30, but was given maxDepth = $maxDepth.") - - // Max memory usage for aggregates - // TODO: Calculate memory usage more precisely. - val maxMemoryUsage: Long = strategy.maxMemoryInMB * 1024L * 1024L - logDebug(s"max memory usage for aggregates = ${maxMemoryUsage} bytes.") - - /* - Stack of nodes to train: (treeIndex, node) - The reason this is a stack is that we train many trees at once, but we want to focus on - completing trees, rather than training all simultaneously. If we are splitting nodes from - 1 tree, then the new nodes to split will be put at the top of this stack, so we will continue - training the same tree in the next iteration. This focus allows us to send fewer trees to - workers on each iteration; see topNodesForGroup below. - */ - val nodeStack = new mutable.ArrayStack[(Int, LearningNode)] - - val rng = new Random() - rng.setSeed(seed) - - // Allocate and queue root nodes. - val topNodes = Array.fill[LearningNode](numTrees)(LearningNode.emptyNode(nodeIndex = 1)) - Range(0, numTrees).foreach(treeIndex => nodeStack.push((treeIndex, topNodes(treeIndex)))) - - val nodeIdCacheX = GradientBoostedTreesUtil.nodeIdCacheXConstruction(topNodes, rawPartInfoBc) - timer.stop("init") - - while (nodeStack.nonEmpty) { - // Collect some nodes to split, and choose features for each node (if subsampling). - // Each group of nodes may come from one or multiple trees, and at multiple levels. - val (nodesForGroup, treeToNodeToIndexInfo) = - RandomForest4GBDTX.selectNodesToSplitX(nodeStack, maxMemoryUsage, metadata, rng) - // Sanity check (should never occur): - assert(nodesForGroup.nonEmpty, - s"RandomForest selected empty nodesForGroup. Error for unknown reason.") - - // Only send trees to worker if they contain nodes being split this iteration. - val topNodesForGroup: Map[Int, LearningNode] = - nodesForGroup.keys.map(treeIdx => treeIdx -> topNodes(treeIdx)).toMap - - // Choose node splits, and enqueue new nodes as needed. - timer.start("findBestSplits") - RandomForest4GBDTX.findBestSplitsX(labelArrayBc, processedInput, metadata, - (nodesForGroup, treeToNodeToIndexInfo), splits, nodeStack, nodeIdCacheX, input, - rawPartInfoBc, timer) - timer.stop("findBestSplits") - } - - timer.stop("total") - - logInfo("Internal timing for DecisionTree:") - logInfo(s"$timer") - - val numFeatures = metadata.numFeatures - - parentUID match { - case Some(uid) => - if (strategy.algo == OldAlgo.Classification) { - // unreachable for GBDT - topNodes.map { rootNode => - new DecisionTreeClassificationModel(uid, rootNode.toNode, numFeatures, - strategy.getNumClasses) - } - } else { - topNodes.map { rootNode => - new DecisionTreeRegressionModel(uid, rootNode.toNode, numFeatures) - } - } - // unreachable for GBDT - case None => - if (strategy.algo == OldAlgo.Classification) { - topNodes.map { rootNode => - new DecisionTreeClassificationModel(rootNode.toNode, numFeatures, - strategy.getNumClasses) - } - } else { - topNodes.map(rootNode => new DecisionTreeRegressionModel(rootNode.toNode, numFeatures)) - } - } - } - - /** - * Given a group of nodes, this finds the best split for each node. - * - * @param input Training data: RDD of [[TreePoint]] - * @param metadata Learning and dataset metadata - * @param splits possible splits for all features, indexed (numFeatures)(numSplits) - * @param nodeStack Queue of nodes to split, with values (treeIndex, node). - * Updated with new non-leaf nodes which are created. - * @param nodeIdCache Node Id cache containing an RDD of Array[Int] where - * each value in the array is the data point's node Id - * for a corresponding tree. This is used to prevent the need - * to pass the entire tree to the executors during - * the node stat aggregation phase. - */ - private[tree] def findBestSplitsX( - labelArrayBc: Broadcast[DoubleArrayList], - processedInput: RDD[(Int, (IntArrayList, ObjectArrayList[Split]))], - metadata: DecisionTreeMetadata, - packagedNodeInfo: (Map[Int, Array[LearningNode]], Map[Int, Map[Int, NodeIndexInfo]]), - splits: Array[Array[Split]], - nodeStack: mutable.ArrayStack[(Int, LearningNode)], - nodeIdCache: Int2ObjectOpenHashMap[Int2ObjectOpenHashMap[IntArrayList]], - input: RDD[TreePoint], - rawPartInfoBc: Broadcast[Int2ObjectOpenHashMap[IntArrayList]], - timer: TimeTracker = new TimeTracker) : Unit = { - - /* - * The high-level descriptions of the best split optimizations are noted here. - * - * *Group-wise training* - * We perform bin calculations for groups of nodes to reduce the number of - * passes over the data. Each iteration requires more computation and storage, - * but saves several iterations over the data. - * - * *Bin-wise computation* - * We use a bin-wise best split computation strategy instead of a straightforward best split - * computation strategy. Instead of analyzing each sample for contribution to the left/right - * child node impurity of every split, we first categorize each feature of a sample into a - * bin. We exploit this structure to calculate aggregates for bins and then use these aggregates - * to calculate information gain for each split. - * - * *Aggregation over partitions* - * Instead of performing a flatMap/reduceByKey operation, we exploit the fact that we know - * the number of splits in advance. Thus, we store the aggregates (at the appropriate - * indices) in a single array for all bins and rely upon the RDD aggregate method to - * drastically reduce the communication overhead. - */ - - // Un-package node info - val (nodesForGroup, treeToNodeToIndexInfo) = packagedNodeInfo - // numNodes: Number of nodes in this group - val numNodes = nodesForGroup.values.map(_.length).sum - logDebug(s"numNodes = ${numNodes}") - logDebug(s"numFeatures = ${metadata.numFeatures}") - logDebug(s"numClasses = ${metadata.numClasses}") - logDebug(s"isMulticlass = ${metadata.isMulticlass}") - logDebug(s"isMulticlassWithCategoricalFeatures =" + - s"${metadata.isMulticlassWithCategoricalFeatures}") - - // array of nodes to train indexed by node index in group - val nodes = new Array[LearningNode](numNodes) - nodesForGroup.foreach { case (treeIndex, nodesForTree) => - nodesForTree.foreach { node => - nodes(treeToNodeToIndexInfo(treeIndex)(node.id).nodeIndexInGroup) = node - } - } - - timer.start("broadcast") - val nodeIdCacheBc = processedInput.sparkContext.broadcast(nodeIdCache) - timer.stop("broadcast") - - // Calculate best splits for all nodes in the group - timer.start("chooseSplits") - - val nodeToBestSplits = GradientBoostedTreesUtil.chooseBestSplits(processedInput, - treeToNodeToIndexInfo, metadata, nodeIdCacheBc, labelArrayBc, nodes) - - timer.stop("chooseSplits") - - // Iterate over all nodes in this group. - nodesForGroup.foreach { case (treeIndex, nodesForTree) => - nodesForTree.foreach { node => - val nodeIndex = node.id - val nodeInfo = treeToNodeToIndexInfo(treeIndex)(nodeIndex) - val aggNodeIndex = nodeInfo.nodeIndexInGroup - val (split: Split, stats: ImpurityStats) = - nodeToBestSplits(nodeIndex) - logDebug(s"best split = ${split}") - - // Extract info for this node. Create children if not leaf. - val isLeaf = - (stats.gain <= 0) || (LearningNode.indexToLevel(nodeIndex) == metadata.maxDepth) - node.isLeaf = isLeaf - node.stats = stats - logDebug(s"Node = ${node}") - - if (!isLeaf) { - node.split = Some(split) - val childIsLeaf = (LearningNode.indexToLevel(nodeIndex) + 1) == metadata.maxDepth - val leftChildIsLeaf = childIsLeaf || (stats.leftImpurity == 0.0) - val rightChildIsLeaf = childIsLeaf || (stats.rightImpurity == 0.0) - node.leftChild = Some(LearningNode(LearningNode.leftChildIndex(nodeIndex), - leftChildIsLeaf, ImpurityStats.getEmptyImpurityStats(stats.leftImpurityCalculator))) - node.rightChild = Some(LearningNode(LearningNode.rightChildIndex(nodeIndex), - rightChildIsLeaf, ImpurityStats.getEmptyImpurityStats(stats.rightImpurityCalculator))) - - // enqueue left child and right child if they are not leaves - if (!leftChildIsLeaf) { - nodeStack.push((treeIndex, node.leftChild.get)) - } - if (!rightChildIsLeaf) { - nodeStack.push((treeIndex, node.rightChild.get)) - } - - logDebug(s"leftChildIndex = ${node.leftChild.get.id}" + - s", impurity = ${stats.leftImpurity}") - logDebug(s"rightChildIndex = ${node.rightChild.get.id}" + - s", impurity = ${stats.rightImpurity}") - } - } - } - - GradientBoostedTreesUtil.updateNodeIdCache(nodeIdCache, nodeIdCacheBc, input, nodesForGroup, - treeToNodeToIndexInfo, splits, rawPartInfoBc, metadata, timer) - } - - /** - * Returns splits for decision tree calculation. - * Continuous and categorical features are handled differently. - * - * Continuous features: - * For each feature, there are numBins - 1 possible splits representing the possible binary - * decisions at each node in the tree. - * This finds locations (feature values) for splits using a subsample of the data. - * - * Categorical features: - * For each feature, there is 1 bin per split. - * Splits and bins are handled in 2 ways: - * (a) "unordered features" - * For multiclass classification with a low-arity feature - * (i.e., if isMulticlass && isSpaceSufficientForAllCategoricalSplits), - * the feature is split based on subsets of categories. - * (b) "ordered features" - * For regression and binary classification, - * and for multiclass classification with a high-arity feature, - * there is one bin per category. - * - * @param input Training data: RDD of [[LabeledPoint]] - * @param metadata Learning and dataset metadata - * @param seed random seed - * @return Splits, an Array of [[Split]] - * of size (numFeatures, numSplits) - */ - protected[tree] def findSplits( - input: RDD[LabeledPoint], - metadata: DecisionTreeMetadata, - seed: Long): Array[Array[Split]] = { - - logDebug(s"isMulticlass = ${metadata.isMulticlass}") - - val numFeatures = metadata.numFeatures - - // Sample the input only if there are continuous features. - val continuousFeatures = Range(0, numFeatures).filter(metadata.isContinuous) - val sampledInput = if (continuousFeatures.nonEmpty) { - // Calculate the number of samples for approximate quantile calculation. - val requiredSamples = math.max(metadata.maxBins * metadata.maxBins, 10000) - val fraction = if (requiredSamples < metadata.numExamples) { - requiredSamples.toDouble / metadata.numExamples - } else { - 1.0 - } - logDebug(s"fraction of data used for calculating quantiles = ${fraction}") - input.sample(withReplacement = false, fraction, new XORShiftRandom(seed).nextInt()) - } else { - input.sparkContext.emptyRDD[LabeledPoint] - } - - findSplitsBySorting(sampledInput, metadata, continuousFeatures) - } - - private def findSplitsBySorting( - input: RDD[LabeledPoint], - metadata: DecisionTreeMetadata, - continuousFeatures: IndexedSeq[Int]): Array[Array[Split]] = { - - val continuousSplits: scala.collection.Map[Int, Array[Split]] = { - // reduce the parallelism for split computations when there are less - // continuous features than input partitions. this prevents tasks from - // being spun up that will definitely do no work. - val numPartitions = math.min(continuousFeatures.length, input.partitions.length) - - input - .flatMap(point => continuousFeatures.map(idx => (idx, point.features(idx)))) - .groupByKey(numPartitions) - .map { case (idx, samples) => - val thresholds = findSplitsForContinuousFeature(samples, metadata, idx) - val splits: Array[Split] = thresholds.map(thresh => new ContinuousSplit(idx, thresh)) - logDebug(s"featureIndex = $idx, numSplits = ${splits.length}") - (idx, splits) - }.collectAsMap() - } - - val numFeatures = metadata.numFeatures - val splits: Array[Array[Split]] = Array.tabulate(numFeatures) { - case i if metadata.isContinuous(i) => - val split = continuousSplits(i) - metadata.setNumSplits(i, split.length) - split - - // unreachable for GBDT - case i if metadata.isCategorical(i) && metadata.isUnordered(i) => - // Unordered features - // 2^(maxFeatureValue - 1) - 1 combinations - val featureArity = metadata.featureArity(i) - Array.tabulate[Split](metadata.numSplits(i)) { splitIndex => - val categories = extractMultiClassCategories(splitIndex + 1, featureArity) - new CategoricalSplit(i, categories.toArray, featureArity) - } - - case i if metadata.isCategorical(i) => - // Ordered features - // Splits are constructed as needed during training. - Array.empty[Split] - } - splits - } - - /** - * Nested method to extract list of eligible categories given an index. It extracts the - * position of ones in a binary representation of the input. If binary - * representation of an number is 01101 (13), the output list should (3.0, 2.0, - * 0.0). The maxFeatureValue depict the number of rightmost digits that will be tested for ones. - */ - private[tree] def extractMultiClassCategories( - input: Int, - maxFeatureValue: Int): List[Double] = { - var categories = List[Double]() - var j = 0 - var bitShiftedInput = input - while (j < maxFeatureValue) { - if (bitShiftedInput % 2 != 0) { - // updating the list of categories. - categories = j.toDouble :: categories - } - // Right shift by one - bitShiftedInput = bitShiftedInput >> 1 - j += 1 - } - categories - } - - /** - * Find splits for a continuous feature - * NOTE: Returned number of splits is set based on `featureSamples` and - * could be different from the specified `numSplits`. - * The `numSplits` attribute in the `DecisionTreeMetadata` class will be set accordingly. - * - * @param featureSamples feature values of each sample - * @param metadata decision tree metadata - * NOTE: `metadata.numbins` will be changed accordingly - * if there are not enough splits to be found - * @param featureIndex feature index to find splits - * @return array of split thresholds - */ - private[tree] def findSplitsForContinuousFeature( - featureSamples: Iterable[Double], - metadata: DecisionTreeMetadata, - featureIndex: Int): Array[Double] = { - require(metadata.isContinuous(featureIndex), - "findSplitsForContinuousFeature can only be used to find splits for a continuous feature.") - - val splits: Array[Double] = if (featureSamples.isEmpty) { - Array.empty[Double] - } else { - val numSplits = metadata.numSplits(featureIndex) - - // get count for each distinct value - val (valueCountMap, numSamples) = featureSamples.foldLeft((Map.empty[Double, Int], 0)) { - case ((m, cnt), x) => - (m + ((x, m.getOrElse(x, 0) + 1)), cnt + 1) - } - // sort distinct values - val valueCounts = valueCountMap.toSeq.sortBy(_._1).toArray - - val possibleSplits = valueCounts.length - 1 - if (possibleSplits == 0) { - // constant feature - Array.empty[Double] - } else if (possibleSplits <= numSplits) { - // if possible splits is not enough or just enough, just return all possible splits - (1 to possibleSplits) - .map(index => (valueCounts(index - 1)._1 + valueCounts(index)._1) / 2.0) - .toArray - } else { - // stride between splits - val stride: Double = numSamples.toDouble / (numSplits + 1) - logDebug(s"stride = ${stride}") - - // iterate `valueCount` to find splits - val splitsBuilder = mutable.ArrayBuilder.make[Double] - var index = 1 - // currentCount: sum of counts of values that have been visited - var currentCount = valueCounts(0)._2 - // targetCount: target value for `currentCount`. - // If `currentCount` is closest value to `targetCount`, - // then current value is a split threshold. - // After finding a split threshold, `targetCount` is added by stride. - var targetCount = stride - while (index < valueCounts.length) { - val previousCount = currentCount - currentCount += valueCounts(index)._2 - val previousGap = math.abs(previousCount - targetCount) - val currentGap = math.abs(currentCount - targetCount) - // If adding count of current value to currentCount - // makes the gap between currentCount and targetCount smaller, - // previous value is a split threshold. - if (previousGap < currentGap) { - splitsBuilder += (valueCounts(index - 1)._1 + valueCounts(index)._1) / 2.0 - targetCount += stride - } - index += 1 - } - - splitsBuilder.result() - } - } - splits - } - - /** - * Pull nodes off of the queue, and collect a group of nodes to be split on this iteration. - * This tracks the memory usage for aggregates and stops adding nodes when too much memory - * will be needed; this allows an adaptive number of nodes since different nodes may require - * different amounts of memory (if featureSubsetStrategy is not "all"). - * - * @param nodeStack Queue of nodes to split. - * @param maxMemoryUsage Bound on size of aggregate statistics. - * @return (nodesForGroup, treeToNodeToIndexInfo). - * nodesForGroup holds the nodes to split: treeIndex --> nodes in tree. - * - * treeToNodeToIndexInfo holds indices selected features for each node: - * treeIndex --> (global) node index --> (node index in group, feature indices). - * The (global) node index is the index in the tree; the node index in group is the - * index in [0, numNodesInGroup) of the node in this group. - * The feature indices are None if not subsampling features. - */ - private[tree] def selectNodesToSplitX( - nodeStack: mutable.ArrayStack[(Int, LearningNode)], - maxMemoryUsage: Long, - metadata: DecisionTreeMetadata, - rng: Random): (Map[Int, Array[LearningNode]], Map[Int, Map[Int, NodeIndexInfo]]) = { - // Collect some nodes to split: - // nodesForGroup(treeIndex) = nodes to split - val mutableNodesForGroup = new mutable.HashMap[Int, mutable.ArrayBuffer[LearningNode]]() - val mutableTreeToNodeToIndexInfo = - new mutable.HashMap[Int, mutable.HashMap[Int, NodeIndexInfo]]() - var memUsage: Long = 0L - var numNodesInGroup = 0 - // If maxMemoryInMB is set very small, we want to still try to split 1 node, - // so we allow one iteration if memUsage == 0. - var groupDone = false - while (nodeStack.nonEmpty && !groupDone) { - val (treeIndex, node) = nodeStack.top - // Choose subset of features for node (if subsampling). - val featureSubset: Option[Array[Int]] = if (metadata.subsamplingFeatures) { - Some(SamplingUtils.reservoirSampleAndCount(Range(0, - metadata.numFeatures).iterator, metadata.numFeaturesPerNode, rng.nextLong())._1) - } else { - None - } - val featureSubsetHashSetX: Option[mutable.HashSet[Int]] = if (metadata.subsamplingFeatures) { - Some(scala.collection.mutable.HashSet(featureSubset.get: _*)) - } else { - None - } - // Check if enough memory remains to add this node to the group. - val nodeMemUsage = RandomForest4GBDTX.aggregateSizeForNode(metadata, featureSubset) * 8L - if (memUsage + nodeMemUsage <= maxMemoryUsage || memUsage == 0) { - nodeStack.pop() - mutableNodesForGroup.getOrElseUpdate(treeIndex, new mutable.ArrayBuffer[LearningNode]()) += - node - mutableTreeToNodeToIndexInfo - .getOrElseUpdate(treeIndex, new mutable.HashMap[Int, NodeIndexInfo]())(node.id) - = new NodeIndexInfo(numNodesInGroup, featureSubset, featureSubsetHashSetX) - numNodesInGroup += 1 - memUsage += nodeMemUsage - } else { - groupDone = true - } - } - if (memUsage > maxMemoryUsage) { - // If maxMemoryUsage is 0, we should still allow splitting 1 node. - logWarning(s"Tree learning is using approximately $memUsage bytes per iteration, which" + - s" exceeds requested limit maxMemoryUsage=$maxMemoryUsage. This allows splitting" + - s" $numNodesInGroup nodes in this iteration.") - } - // Convert mutable maps to immutable ones. - val nodesForGroup: Map[Int, Array[LearningNode]] = - mutableNodesForGroup.mapValues(_.toArray).toMap - val treeToNodeToIndexInfo = mutableTreeToNodeToIndexInfo.mapValues(_.toMap).toMap - (nodesForGroup, treeToNodeToIndexInfo) - } - - /** - * Get the number of values to be stored for this node in the bin aggregates. - * - * @param featureSubset Indices of features which may be split at this node. - * If None, then use all features. - */ - private def aggregateSizeForNode( - metadata: DecisionTreeMetadata, - featureSubset: Option[Array[Int]]): Long = { - val totalBins = if (featureSubset.nonEmpty) { - featureSubset.get.map(featureIndex => metadata.numBins(featureIndex).toLong).sum - } else { - metadata.numBins.map(_.toLong).sum - } - if (metadata.isClassification) { - // unreachable for GBDT - metadata.numClasses * totalBins - } else { - 3 * totalBins - } - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForestRaw.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForestRaw.scala deleted file mode 100644 index 25dfd36..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/impl/RandomForestRaw.scala +++ /dev/null @@ -1,1157 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree.impl - -import java.io.IOException - -import scala.collection.mutable -import scala.util.Random - -import org.apache.spark.internal.Logging -import org.apache.spark.ml.classification.DecisionTreeClassificationModel -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.regression.DecisionTreeRegressionModel -import org.apache.spark.ml.tree._ -import org.apache.spark.ml.util.Instrumentation -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, Strategy => OldStrategy} -import org.apache.spark.mllib.tree.impurity.ImpurityCalculator -import org.apache.spark.mllib.tree.model.ImpurityStats -import org.apache.spark.rdd.RDD -import org.apache.spark.storage.StorageLevel -import org.apache.spark.util.random.{SamplingUtils, XORShiftRandom} - - -/** - * ALGORITHM - * - * This is a sketch of the algorithm to help new developers. - * - * The algorithm partitions data by instances (rows). - * On each iteration, the algorithm splits a set of nodes. In order to choose the best split - * for a given node, sufficient statistics are collected from the distributed data. - * For each node, the statistics are collected to some worker node, and that worker selects - * the best split. - * - * This setup requires discretization of continuous features. This binning is done in the - * findSplits() method during initialization, after which each continuous feature becomes - * an ordered discretized feature with at most maxBins possible values. - * - * The main loop in the algorithm operates on a queue of nodes (nodeStack). These nodes - * lie at the periphery of the tree being trained. If multiple trees are being trained at once, - * then this queue contains nodes from all of them. Each iteration works roughly as follows: - * On the master node: - * - Some number of nodes are pulled off of the queue (based on the amount of memory - * required for their sufficient statistics). - * - For random forests, if featureSubsetStrategy is not "all," then a subset of candidate - * features are chosen for each node. See method selectNodesToSplit(). - * On worker nodes, via method findBestSplits(): - * - The worker makes one pass over its subset of instances. - * - For each (tree, node, feature, split) tuple, the worker collects statistics about - * splitting. Note that the set of (tree, node) pairs is limited to the nodes selected - * from the queue for this iteration. The set of features considered can also be limited - * based on featureSubsetStrategy. - * - For each node, the statistics for that node are aggregated to a particular worker - * via reduceByKey(). The designated worker chooses the best (feature, split) pair, - * or chooses to stop splitting if the stopping criteria are met. - * On the master node: - * - The master collects all decisions about splitting nodes and updates the model. - * - The updated model is passed to the workers on the next iteration. - * This process continues until the node queue is empty. - * - * Most of the methods in this implementation support the statistics aggregation, which is - * the heaviest part of the computation. In general, this implementation is bound by either - * the cost of statistics computation on workers or by communicating the sufficient statistics. - */ -private[spark] object RandomForestRaw extends Logging { - - /** - * Train a random forest. - * - * @param input Training data: RDD of `LabeledPoint` - * @return an unweighted set of trees - */ - def run( - input: RDD[LabeledPoint], - strategy: OldStrategy, - numTrees: Int, - featureSubsetStrategy: String, - seed: Long, - instr: Option[Instrumentation[_]], - parentUID: Option[String] = None): Array[DecisionTreeModel] = { - - val timer = new TimeTracker() - - timer.start("total") - - timer.start("init") - - val retaggedInput = input.retag(classOf[LabeledPoint]) - val metadata = - DecisionTreeMetadata.buildMetadata(retaggedInput, strategy, numTrees, featureSubsetStrategy) - instr match { - case Some(instrumentation) => - instrumentation.logNumFeatures(metadata.numFeatures) - instrumentation.logNumClasses(metadata.numClasses) - case None => - logInfo(s"numFeatures: ${metadata.numFeatures}") - logInfo(s"numClasses: ${metadata.numClasses}") - } - - // Find the splits and the corresponding bins (interval between the splits) using a sample - // of the input data. - timer.start("findSplits") - val splits = findSplits(retaggedInput, metadata, seed) - timer.stop("findSplits") - logDebug("numBins: feature: number of bins") - logDebug(Range(0, metadata.numFeatures).map { featureIndex => - s"\t$featureIndex\t${metadata.numBins(featureIndex)}" - }.mkString("\n")) - - // Bin feature values (TreePoint representation). - // Cache input RDD for speedup during multiple passes. - val treeInput = TreePoint.convertToTreeRDD(retaggedInput, splits, metadata) - - val withReplacement = numTrees > 1 - - val baggedInput = BaggedPoint - .convertToBaggedRDD(treeInput, strategy.subsamplingRate, numTrees, withReplacement, seed) - .persist(StorageLevel.MEMORY_AND_DISK) - - // depth of the decision tree - val maxDepth = strategy.maxDepth - require(maxDepth <= 30, - s"DecisionTree currently only supports maxDepth <= 30, but was given maxDepth = $maxDepth.") - - // Max memory usage for aggregates - // TODO: Calculate memory usage more precisely. - val maxMemoryUsage: Long = strategy.maxMemoryInMB * 1024L * 1024L - logDebug(s"max memory usage for aggregates = ${maxMemoryUsage} bytes.") - - /* - * The main idea here is to perform group-wise training of the decision tree nodes thus - * reducing the passes over the data from (# nodes) to (# nodes / maxNumberOfNodesPerGroup). - * Each data sample is handled by a particular node (or it reaches a leaf and is not used - * in lower levels). - */ - - // Create an RDD of node Id cache. - // At first, all the rows belong to the root nodes (node Id == 1). - val nodeIdCache = if (strategy.useNodeIdCache) { - Some(NodeIdCache.init( - data = baggedInput, - numTrees = numTrees, - checkpointInterval = strategy.checkpointInterval, - initVal = 1)) - } else { - None - } - - /* - Stack of nodes to train: (treeIndex, node) - The reason this is a stack is that we train many trees at once, but we want to focus on - completing trees, rather than training all simultaneously. If we are splitting nodes from - 1 tree, then the new nodes to split will be put at the top of this stack, so we will continue - training the same tree in the next iteration. This focus allows us to send fewer trees to - workers on each iteration; see topNodesForGroup below. - */ - val nodeStack = new mutable.ArrayStack[(Int, LearningNode)] - - val rng = new Random() - rng.setSeed(seed) - - // Allocate and queue root nodes. - val topNodes = Array.fill[LearningNode](numTrees)(LearningNode.emptyNode(nodeIndex = 1)) - Range(0, numTrees).foreach(treeIndex => nodeStack.push((treeIndex, topNodes(treeIndex)))) - - timer.stop("init") - - while (nodeStack.nonEmpty) { - // Collect some nodes to split, and choose features for each node (if subsampling). - // Each group of nodes may come from one or multiple trees, and at multiple levels. - val (nodesForGroup, treeToNodeToIndexInfo) = - RandomForestRaw.selectNodesToSplit(nodeStack, maxMemoryUsage, metadata, rng) - // Sanity check (should never occur): - assert(nodesForGroup.nonEmpty, - s"RandomForest selected empty nodesForGroup. Error for unknown reason.") - - // Only send trees to worker if they contain nodes being split this iteration. - val topNodesForGroup: Map[Int, LearningNode] = - nodesForGroup.keys.map(treeIdx => treeIdx -> topNodes(treeIdx)).toMap - - // Choose node splits, and enqueue new nodes as needed. - timer.start("findBestSplits") - RandomForestRaw.findBestSplits(baggedInput, metadata, topNodesForGroup, nodesForGroup, - treeToNodeToIndexInfo, splits, nodeStack, timer, nodeIdCache) - timer.stop("findBestSplits") - } - - baggedInput.unpersist() - - timer.stop("total") - - logInfo("Internal timing for DecisionTree:") - logInfo(s"$timer") - - // Delete any remaining checkpoints used for node Id cache. - if (nodeIdCache.nonEmpty) { - try { - nodeIdCache.get.deleteAllCheckpoints() - } catch { - case e: IOException => - logWarning(s"delete all checkpoints failed. Error reason: ${e.getMessage}") - } - } - - val numFeatures = metadata.numFeatures - - parentUID match { - case Some(uid) => - if (strategy.algo == OldAlgo.Classification) { - topNodes.map { rootNode => - new DecisionTreeClassificationModel(uid, rootNode.toNode, numFeatures, - strategy.getNumClasses) - } - } else { - topNodes.map { rootNode => - new DecisionTreeRegressionModel(uid, rootNode.toNode, numFeatures) - } - } - case None => - if (strategy.algo == OldAlgo.Classification) { - topNodes.map { rootNode => - new DecisionTreeClassificationModel(rootNode.toNode, numFeatures, - strategy.getNumClasses) - } - } else { - topNodes.map(rootNode => new DecisionTreeRegressionModel(rootNode.toNode, numFeatures)) - } - } - } - - /** - * Helper for binSeqOp, for data which can contain a mix of ordered and unordered features. - * - * For ordered features, a single bin is updated. - * For unordered features, bins correspond to subsets of categories; either the left or right bin - * for each subset is updated. - * - * @param agg Array storing aggregate calculation, with a set of sufficient statistics for - * each (feature, bin). - * @param treePoint Data point being aggregated. - * @param splits possible splits indexed (numFeatures)(numSplits) - * @param unorderedFeatures Set of indices of unordered features. - * @param instanceWeight Weight (importance) of instance in dataset. - */ - private def mixedBinSeqOp( - agg: DTStatsAggregator, - treePoint: TreePoint, - splits: Array[Array[Split]], - unorderedFeatures: Set[Int], - instanceWeight: Double, - featuresForNode: Option[Array[Int]]): Unit = { - val numFeaturesPerNode = if (featuresForNode.nonEmpty) { - // Use subsampled features - featuresForNode.get.length - } else { - // Use all features - agg.metadata.numFeatures - } - // Iterate over features. - var featureIndexIdx = 0 - while (featureIndexIdx < numFeaturesPerNode) { - val featureIndex = if (featuresForNode.nonEmpty) { - featuresForNode.get.apply(featureIndexIdx) - } else { - featureIndexIdx - } - if (unorderedFeatures.contains(featureIndex)) { - // Unordered feature - val featureValue = treePoint.binnedFeatures(featureIndex) - val leftNodeFeatureOffset = agg.getFeatureOffset(featureIndexIdx) - // Update the left or right bin for each split. - val numSplits = agg.metadata.numSplits(featureIndex) - val featureSplits = splits(featureIndex) - var splitIndex = 0 - while (splitIndex < numSplits) { - if (featureSplits(splitIndex).shouldGoLeft(featureValue, featureSplits)) { - agg.featureUpdate(leftNodeFeatureOffset, splitIndex, - treePoint.label, instanceWeight.toInt) - } - splitIndex += 1 - } - } else { - // Ordered feature - val binIndex = treePoint.binnedFeatures(featureIndex) - agg.update(featureIndexIdx, binIndex, treePoint.label, instanceWeight.toInt) - } - featureIndexIdx += 1 - } - } - - /** - * Helper for binSeqOp, for regression and for classification with only ordered features. - * - * For each feature, the sufficient statistics of one bin are updated. - * - * @param agg Array storing aggregate calculation, with a set of sufficient statistics for - * each (feature, bin). - * @param treePoint Data point being aggregated. - * @param instanceWeight Weight (importance) of instance in dataset. - */ - private def orderedBinSeqOp( - agg: DTStatsAggregator, - treePoint: TreePoint, - instanceWeight: Double, - featuresForNode: Option[Array[Int]]): Unit = { - val label = treePoint.label - - // Iterate over features. - if (featuresForNode.nonEmpty) { - // Use subsampled features - var featureIndexIdx = 0 - while (featureIndexIdx < featuresForNode.get.length) { - val binIndex = treePoint.binnedFeatures(featuresForNode.get.apply(featureIndexIdx)) - agg.update(featureIndexIdx, binIndex, label, instanceWeight.toInt) - featureIndexIdx += 1 - } - } else { - // Use all features - val numFeatures = agg.metadata.numFeatures - var featureIndex = 0 - while (featureIndex < numFeatures) { - val binIndex = treePoint.binnedFeatures(featureIndex) - agg.update(featureIndex, binIndex, label, instanceWeight.toInt) - featureIndex += 1 - } - } - } - - /** - * Given a group of nodes, this finds the best split for each node. - * - * @param input Training data: RDD of [[TreePoint]] - * @param metadata Learning and dataset metadata - * @param topNodesForGroup For each tree in group, tree index -> root node. - * Used for matching instances with nodes. - * @param nodesForGroup Mapping: treeIndex --> nodes to be split in tree - * @param treeToNodeToIndexInfo Mapping: treeIndex --> nodeIndex --> nodeIndexInfo, - * where nodeIndexInfo stores the index in the group and the - * feature subsets (if using feature subsets). - * @param splits possible splits for all features, indexed (numFeatures)(numSplits) - * @param nodeStack Queue of nodes to split, with values (treeIndex, node). - * Updated with new non-leaf nodes which are created. - * @param nodeIdCache Node Id cache containing an RDD of Array[Int] where - * each value in the array is the data point's node Id - * for a corresponding tree. This is used to prevent the need - * to pass the entire tree to the executors during - * the node stat aggregation phase. - */ - private[tree] def findBestSplits( - input: RDD[BaggedPoint[TreePoint]], - metadata: DecisionTreeMetadata, - topNodesForGroup: Map[Int, LearningNode], - nodesForGroup: Map[Int, Array[LearningNode]], - treeToNodeToIndexInfo: Map[Int, Map[Int, NodeIndexInfo]], - splits: Array[Array[Split]], - nodeStack: mutable.ArrayStack[(Int, LearningNode)], - timer: TimeTracker = new TimeTracker, - nodeIdCache: Option[NodeIdCache] = None): Unit = { - - /* - * The high-level descriptions of the best split optimizations are noted here. - * - * *Group-wise training* - * We perform bin calculations for groups of nodes to reduce the number of - * passes over the data. Each iteration requires more computation and storage, - * but saves several iterations over the data. - * - * *Bin-wise computation* - * We use a bin-wise best split computation strategy instead of a straightforward best split - * computation strategy. Instead of analyzing each sample for contribution to the left/right - * child node impurity of every split, we first categorize each feature of a sample into a - * bin. We exploit this structure to calculate aggregates for bins and then use these aggregates - * to calculate information gain for each split. - * - * *Aggregation over partitions* - * Instead of performing a flatMap/reduceByKey operation, we exploit the fact that we know - * the number of splits in advance. Thus, we store the aggregates (at the appropriate - * indices) in a single array for all bins and rely upon the RDD aggregate method to - * drastically reduce the communication overhead. - */ - - // numNodes: Number of nodes in this group - val numNodes = nodesForGroup.values.map(_.length).sum - logDebug(s"numNodes = ${numNodes}") - logDebug(s"numFeatures = ${metadata.numFeatures}") - logDebug(s"numClasses = ${metadata.numClasses}") - logDebug(s"isMulticlass = ${metadata.isMulticlass}") - logDebug("isMulticlassWithCategoricalFeatures = " + - s"${metadata.isMulticlassWithCategoricalFeatures}") - logDebug(s"using nodeIdCache = ${nodeIdCache.nonEmpty.toString}") - - /** - * Performs a sequential aggregation over a partition for a particular tree and node. - * - * For each feature, the aggregate sufficient statistics are updated for the relevant - * bins. - * - * @param treeIndex Index of the tree that we want to perform aggregation for. - * @param nodeInfo The node info for the tree node. - * @param agg Array storing aggregate calculation, with a set of sufficient statistics - * for each (node, feature, bin). - * @param baggedPoint Data point being aggregated. - */ - def nodeBinSeqOp( - treeIndex: Int, - nodeInfo: NodeIndexInfo, - agg: Array[DTStatsAggregator], - baggedPoint: BaggedPoint[TreePoint]): Unit = { - if (nodeInfo != null) { - val aggNodeIndex = nodeInfo.nodeIndexInGroup - val featuresForNode = nodeInfo.featureSubset - val instanceWeight = baggedPoint.subsampleWeights(treeIndex) - if (metadata.unorderedFeatures.isEmpty) { - orderedBinSeqOp(agg(aggNodeIndex), baggedPoint.datum, instanceWeight, featuresForNode) - } else { - mixedBinSeqOp(agg(aggNodeIndex), baggedPoint.datum, splits, - metadata.unorderedFeatures, instanceWeight, featuresForNode) - } - agg(aggNodeIndex).updateParent(baggedPoint.datum.label, instanceWeight) - } - } - - /** - * Performs a sequential aggregation over a partition. - * - * Each data point contributes to one node. For each feature, - * the aggregate sufficient statistics are updated for the relevant bins. - * - * @param agg Array storing aggregate calculation, with a set of sufficient statistics for - * each (node, feature, bin). - * @param baggedPoint Data point being aggregated. - * @return agg - */ - def binSeqOp( - agg: Array[DTStatsAggregator], - baggedPoint: BaggedPoint[TreePoint]): Array[DTStatsAggregator] = { - treeToNodeToIndexInfo.foreach { case (treeIndex, nodeIndexToInfo) => - val nodeIndex = - topNodesForGroup(treeIndex).predictImpl(baggedPoint.datum.binnedFeatures, splits) - nodeBinSeqOp(treeIndex, nodeIndexToInfo.getOrElse(nodeIndex, null), agg, baggedPoint) - } - agg - } - - /** - * Do the same thing as binSeqOp, but with nodeIdCache. - */ - def binSeqOpWithNodeIdCache( - agg: Array[DTStatsAggregator], - dataPoint: (BaggedPoint[TreePoint], Array[Int])): Array[DTStatsAggregator] = { - treeToNodeToIndexInfo.foreach { case (treeIndex, nodeIndexToInfo) => - val baggedPoint = dataPoint._1 - val nodeIdCache = dataPoint._2 - val nodeIndex = nodeIdCache(treeIndex) - nodeBinSeqOp(treeIndex, nodeIndexToInfo.getOrElse(nodeIndex, null), agg, baggedPoint) - } - - agg - } - - /** - * Get node index in group --> features indices map, - * which is a short cut to find feature indices for a node given node index in group. - */ - def getNodeToFeatures( - treeToNodeToIndexInfo: Map[Int, Map[Int, NodeIndexInfo]]): Option[Map[Int, Array[Int]]] = { - if (!metadata.subsamplingFeatures) { - None - } else { - val mutableNodeToFeatures = new mutable.HashMap[Int, Array[Int]]() - treeToNodeToIndexInfo.values.foreach { nodeIdToNodeInfo => - nodeIdToNodeInfo.values.foreach { nodeIndexInfo => - assert(nodeIndexInfo.featureSubset.isDefined) - mutableNodeToFeatures(nodeIndexInfo.nodeIndexInGroup) = nodeIndexInfo.featureSubset.get - } - } - Some(mutableNodeToFeatures.toMap) - } - } - - // array of nodes to train indexed by node index in group - val nodes = new Array[LearningNode](numNodes) - nodesForGroup.foreach { case (treeIndex, nodesForTree) => - nodesForTree.foreach { node => - nodes(treeToNodeToIndexInfo(treeIndex)(node.id).nodeIndexInGroup) = node - } - } - - // Calculate best splits for all nodes in the group - timer.start("chooseSplits") - - // In each partition, iterate all instances and compute aggregate stats for each node, - // yield a (nodeIndex, nodeAggregateStats) pair for each node. - // After a `reduceByKey` operation, - // stats of a node will be shuffled to a particular partition and be combined together, - // then best splits for nodes are found there. - // Finally, only best Splits for nodes are collected to driver to construct decision tree. - val nodeToFeatures = getNodeToFeatures(treeToNodeToIndexInfo) - val nodeToFeaturesBc = input.sparkContext.broadcast(nodeToFeatures) - - val partitionAggregates: RDD[(Int, DTStatsAggregator)] = if (nodeIdCache.nonEmpty) { - input.zip(nodeIdCache.get.nodeIdsForInstances).mapPartitions { points => - // Construct a nodeStatsAggregators array to hold node aggregate stats, - // each node will have a nodeStatsAggregator - val nodeStatsAggregators = Array.tabulate(numNodes) { nodeIndex => - val featuresForNode = nodeToFeaturesBc.value.map { nodeToFeatures => - nodeToFeatures(nodeIndex) - } - new DTStatsAggregator(metadata, featuresForNode) - } - - // iterator all instances in current partition and update aggregate stats - points.foreach(binSeqOpWithNodeIdCache(nodeStatsAggregators, _)) - - // transform nodeStatsAggregators array to (nodeIndex, nodeAggregateStats) pairs, - // which can be combined with other partition using `reduceByKey` - nodeStatsAggregators.view.zipWithIndex.map(_.swap).iterator - } - } else { - input.mapPartitions { points => - // Construct a nodeStatsAggregators array to hold node aggregate stats, - // each node will have a nodeStatsAggregator - val nodeStatsAggregators = Array.tabulate(numNodes) { nodeIndex => - val featuresForNode = nodeToFeaturesBc.value.flatMap { nodeToFeatures => - Some(nodeToFeatures(nodeIndex)) - } - new DTStatsAggregator(metadata, featuresForNode) - } - - // iterator all instances in current partition and update aggregate stats - points.foreach(binSeqOp(nodeStatsAggregators, _)) - - // transform nodeStatsAggregators array to (nodeIndex, nodeAggregateStats) pairs, - // which can be combined with other partition using `reduceByKey` - nodeStatsAggregators.view.zipWithIndex.map(_.swap).iterator - } - } - - val nodeToBestSplits = partitionAggregates.reduceByKey((a, b) => a.merge(b)).map { - case (nodeIndex, aggStats) => - val featuresForNode = nodeToFeaturesBc.value.flatMap { nodeToFeatures => - Some(nodeToFeatures(nodeIndex)) - } - - // find best split for each node - val (split: Split, stats: ImpurityStats) = - binsToBestSplit(aggStats, splits, featuresForNode, nodes(nodeIndex)) - (nodeIndex, (split, stats)) - }.collectAsMap() - - timer.stop("chooseSplits") - - val nodeIdUpdaters = if (nodeIdCache.nonEmpty) { - Array.fill[mutable.Map[Int, NodeIndexUpdaterRaw]]( - metadata.numTrees)(mutable.Map[Int, NodeIndexUpdaterRaw]()) - } else { - null - } - // Iterate over all nodes in this group. - nodesForGroup.foreach { case (treeIndex, nodesForTree) => - nodesForTree.foreach { node => - val nodeIndex = node.id - val nodeInfo = treeToNodeToIndexInfo(treeIndex)(nodeIndex) - val aggNodeIndex = nodeInfo.nodeIndexInGroup - val (split: Split, stats: ImpurityStats) = - nodeToBestSplits(aggNodeIndex) - logDebug(s"best split = ${split}") - - // Extract info for this node. Create children if not leaf. - val isLeaf = - (stats.gain <= 0) || (LearningNode.indexToLevel(nodeIndex) == metadata.maxDepth) - node.isLeaf = isLeaf - node.stats = stats - logDebug(s"Node = ${node}") - - if (!isLeaf) { - node.split = Some(split) - val childIsLeaf = (LearningNode.indexToLevel(nodeIndex) + 1) == metadata.maxDepth - val leftChildIsLeaf = childIsLeaf || (stats.leftImpurity == 0.0) - val rightChildIsLeaf = childIsLeaf || (stats.rightImpurity == 0.0) - node.leftChild = Some(LearningNode(LearningNode.leftChildIndex(nodeIndex), - leftChildIsLeaf, ImpurityStats.getEmptyImpurityStats(stats.leftImpurityCalculator))) - node.rightChild = Some(LearningNode(LearningNode.rightChildIndex(nodeIndex), - rightChildIsLeaf, ImpurityStats.getEmptyImpurityStats(stats.rightImpurityCalculator))) - - if (nodeIdCache.nonEmpty) { - val nodeIndexUpdater = NodeIndexUpdaterRaw( - split = split, - nodeIndex = nodeIndex) - nodeIdUpdaters(treeIndex).put(nodeIndex, nodeIndexUpdater) - } - - // enqueue left child and right child if they are not leaves - if (!leftChildIsLeaf) { - nodeStack.push((treeIndex, node.leftChild.get)) - } - if (!rightChildIsLeaf) { - nodeStack.push((treeIndex, node.rightChild.get)) - } - - logDebug(s"leftChildIndex = ${node.leftChild.get.id}" + - s", impurity = ${stats.leftImpurity}") - logDebug(s"rightChildIndex = ${node.rightChild.get.id}" + - s", impurity = ${stats.rightImpurity}") - } - } - } - - if (nodeIdCache.nonEmpty) { - // Update the cache if needed. - nodeIdCache.get.updateNodeIndicesRaw(input, nodeIdUpdaters, splits) - } - } - - /** - * Calculate the impurity statistics for a given (feature, split) based upon left/right - * aggregates. - * - * @param stats the recycle impurity statistics for this feature's all splits, - * only 'impurity' and 'impurityCalculator' are valid between each iteration - * @param leftImpurityCalculator left node aggregates for this (feature, split) - * @param rightImpurityCalculator right node aggregate for this (feature, split) - * @param metadata learning and dataset metadata for DecisionTree - * @return Impurity statistics for this (feature, split) - */ - private def calculateImpurityStats( - stats: ImpurityStats, - leftImpurityCalculator: ImpurityCalculator, - rightImpurityCalculator: ImpurityCalculator, - metadata: DecisionTreeMetadata): ImpurityStats = { - - val parentImpurityCalculator: ImpurityCalculator = if (stats == null) { - leftImpurityCalculator.copy.add(rightImpurityCalculator) - } else { - stats.impurityCalculator - } - - val impurity: Double = if (stats == null) { - parentImpurityCalculator.calculate() - } else { - stats.impurity - } - - val leftCount = leftImpurityCalculator.count - val rightCount = rightImpurityCalculator.count - - val totalCount = leftCount + rightCount - - // If left child or right child doesn't satisfy minimum instances per node, - // then this split is invalid, return invalid information gain stats. - if ((leftCount < metadata.minInstancesPerNode) || - (rightCount < metadata.minInstancesPerNode)) { - return ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator) - } - - val leftImpurity = leftImpurityCalculator.calculate() // Note: This equals 0 if count = 0 - val rightImpurity = rightImpurityCalculator.calculate() - - val leftWeight = leftCount / totalCount.toDouble - val rightWeight = rightCount / totalCount.toDouble - - val gain = impurity - leftWeight * leftImpurity - rightWeight * rightImpurity - - // if information gain doesn't satisfy minimum information gain, - // then this split is invalid, return invalid information gain stats. - if (gain < metadata.minInfoGain) { - return ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator) - } - - new ImpurityStats(gain, impurity, parentImpurityCalculator, - leftImpurityCalculator, rightImpurityCalculator) - } - - /** - * Find the best split for a node. - * - * @param binAggregates Bin statistics. - * @return tuple for best split: (Split, information gain, prediction at node) - */ - private[tree] def binsToBestSplit( - binAggregates: DTStatsAggregator, - splits: Array[Array[Split]], - featuresForNode: Option[Array[Int]], - node: LearningNode): (Split, ImpurityStats) = { - - // Calculate InformationGain and ImpurityStats if current node is top node - val level = LearningNode.indexToLevel(node.id) - var gainAndImpurityStats: ImpurityStats = if (level == 0) { - null - } else { - node.stats - } - - val validFeatureSplits = - Range(0, binAggregates.metadata.numFeaturesPerNode).view.map { featureIndexIdx => - featuresForNode.map(features => (featureIndexIdx, features(featureIndexIdx))) - .getOrElse((featureIndexIdx, featureIndexIdx)) - }.withFilter { case (_, featureIndex) => - binAggregates.metadata.numSplits(featureIndex) != 0 - } - - // For each (feature, split), calculate the gain, and select the best (feature, split). - val splitsAndImpurityInfo = - validFeatureSplits.map { case (featureIndexIdx, featureIndex) => - val numSplits = binAggregates.metadata.numSplits(featureIndex) - if (binAggregates.metadata.isContinuous(featureIndex)) { - // Cumulative sum (scanLeft) of bin statistics. - // Afterwards, binAggregates for a bin is the sum of aggregates for - // that bin + all preceding bins. - val nodeFeatureOffset = binAggregates.getFeatureOffset(featureIndexIdx) - var splitIndex = 0 - while (splitIndex < numSplits) { - binAggregates.mergeForFeature(nodeFeatureOffset, splitIndex + 1, splitIndex) - splitIndex += 1 - } - // Find best split. - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { case splitIdx => - val leftChildStats = binAggregates.getImpurityCalculator(nodeFeatureOffset, splitIdx) - val rightChildStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, numSplits) - rightChildStats.subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIdx, gainAndImpurityStats) - }.maxBy(_._2.gain) - (splits(featureIndex)(bestFeatureSplitIndex), bestFeatureGainStats) - } else if (binAggregates.metadata.isUnordered(featureIndex)) { - // Unordered categorical feature - val leftChildOffset = binAggregates.getFeatureOffset(featureIndexIdx) - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { splitIndex => - val leftChildStats = binAggregates.getImpurityCalculator(leftChildOffset, splitIndex) - val rightChildStats = binAggregates.getParentImpurityCalculator() - .subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIndex, gainAndImpurityStats) - }.maxBy(_._2.gain) - (splits(featureIndex)(bestFeatureSplitIndex), bestFeatureGainStats) - } else { - // Ordered categorical feature - val nodeFeatureOffset = binAggregates.getFeatureOffset(featureIndexIdx) - val numCategories = binAggregates.metadata.numBins(featureIndex) - - /* Each bin is one category (feature value). - * The bins are ordered based on centroidForCategories, and this ordering determines which - * splits are considered. (With K categories, we consider K - 1 possible splits.) - * - * centroidForCategories is a list: (category, centroid) - */ - val centroidForCategories = Range(0, numCategories).map { case featureValue => - val categoryStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, featureValue) - val centroid = if (categoryStats.count != 0) { - if (binAggregates.metadata.isMulticlass) { - // multiclass classification - // For categorical variables in multiclass classification, - // the bins are ordered by the impurity of their corresponding labels. - categoryStats.calculate() - } else if (binAggregates.metadata.isClassification) { - // binary classification - // For categorical variables in binary classification, - // the bins are ordered by the count of class 1. - categoryStats.stats(1) - } else { - // regression - // For categorical variables in regression and binary classification, - // the bins are ordered by the prediction. - categoryStats.predict - } - } else { - Double.MaxValue - } - (featureValue, centroid) - } - - logDebug(s"Centroids for categorical variable: ${ centroidForCategories.mkString(",")}") - - // bins sorted by centroids - val categoriesSortedByCentroid = centroidForCategories.toList.sortBy(_._2) - - logDebug("Sorted centroids for categorical variable = " + - categoriesSortedByCentroid.mkString(",")) - - // Cumulative sum (scanLeft) of bin statistics. - // Afterwards, binAggregates for a bin is the sum of aggregates for - // that bin + all preceding bins. - var splitIndex = 0 - while (splitIndex < numSplits) { - val currentCategory = categoriesSortedByCentroid(splitIndex)._1 - val nextCategory = categoriesSortedByCentroid(splitIndex + 1)._1 - binAggregates.mergeForFeature(nodeFeatureOffset, nextCategory, currentCategory) - splitIndex += 1 - } - // lastCategory = index of bin with total aggregates for this (node, feature) - val lastCategory = categoriesSortedByCentroid.last._1 - // Find best split. - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { splitIndex => - val featureValue = categoriesSortedByCentroid(splitIndex)._1 - val leftChildStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, featureValue) - val rightChildStats = - binAggregates.getImpurityCalculator(nodeFeatureOffset, lastCategory) - rightChildStats.subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIndex, gainAndImpurityStats) - }.maxBy(_._2.gain) - val categoriesForSplit = - categoriesSortedByCentroid.map(_._1.toDouble).slice(0, bestFeatureSplitIndex + 1) - val bestFeatureSplit = - new CategoricalSplit(featureIndex, categoriesForSplit.toArray, numCategories) - (bestFeatureSplit, bestFeatureGainStats) - } - } - - val (bestSplit, bestSplitStats) = - if (splitsAndImpurityInfo.isEmpty) { - // If no valid splits for features, then this split is invalid, - // return invalid information gain stats. Take any split and continue. - // Splits is empty, so arbitrarily choose to split on any threshold - val dummyFeatureIndex = featuresForNode.map(_.head).getOrElse(0) - val parentImpurityCalculator = binAggregates.getParentImpurityCalculator() - if (binAggregates.metadata.isContinuous(dummyFeatureIndex)) { - (new ContinuousSplit(dummyFeatureIndex, 0), - ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator)) - } else { - val numCategories = binAggregates.metadata.featureArity(dummyFeatureIndex) - (new CategoricalSplit(dummyFeatureIndex, Array(), numCategories), - ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator)) - } - } else { - splitsAndImpurityInfo.maxBy(_._2.gain) - } - (bestSplit, bestSplitStats) - } - - /** - * Returns splits for decision tree calculation. - * Continuous and categorical features are handled differently. - * - * Continuous features: - * For each feature, there are numBins - 1 possible splits representing the possible binary - * decisions at each node in the tree. - * This finds locations (feature values) for splits using a subsample of the data. - * - * Categorical features: - * For each feature, there is 1 bin per split. - * Splits and bins are handled in 2 ways: - * (a) "unordered features" - * For multiclass classification with a low-arity feature - * (i.e., if isMulticlass && isSpaceSufficientForAllCategoricalSplits), - * the feature is split based on subsets of categories. - * (b) "ordered features" - * For regression and binary classification, - * and for multiclass classification with a high-arity feature, - * there is one bin per category. - * - * @param input Training data: RDD of [[LabeledPoint]] - * @param metadata Learning and dataset metadata - * @param seed random seed - * @return Splits, an Array of [[Split]] - * of size (numFeatures, numSplits) - */ - protected[tree] def findSplits( - input: RDD[LabeledPoint], - metadata: DecisionTreeMetadata, - seed: Long): Array[Array[Split]] = { - - logDebug(s"isMulticlass = ${metadata.isMulticlass}") - - val numFeatures = metadata.numFeatures - - // Sample the input only if there are continuous features. - val continuousFeatures = Range(0, numFeatures).filter(metadata.isContinuous) - val sampledInput = if (continuousFeatures.nonEmpty) { - // Calculate the number of samples for approximate quantile calculation. - val requiredSamples = math.max(metadata.maxBins * metadata.maxBins, 10000) - val fraction = if (requiredSamples < metadata.numExamples) { - requiredSamples.toDouble / metadata.numExamples - } else { - 1.0 - } - logDebug(s"fraction of data used for calculating quantiles = ${fraction}") - input.sample(withReplacement = false, fraction, new XORShiftRandom(seed).nextInt()) - } else { - input.sparkContext.emptyRDD[LabeledPoint] - } - - findSplitsBySorting(sampledInput, metadata, continuousFeatures) - } - - private def findSplitsBySorting( - input: RDD[LabeledPoint], - metadata: DecisionTreeMetadata, - continuousFeatures: IndexedSeq[Int]): Array[Array[Split]] = { - - val continuousSplits: scala.collection.Map[Int, Array[Split]] = { - // reduce the parallelism for split computations when there are less - // continuous features than input partitions. this prevents tasks from - // being spun up that will definitely do no work. - val numPartitions = math.min(continuousFeatures.length, input.partitions.length) - - input - .flatMap(point => continuousFeatures.map(idx => (idx, point.features(idx)))) - .groupByKey(numPartitions) - .map { case (idx, samples) => - val thresholds = findSplitsForContinuousFeature(samples, metadata, idx) - val splits: Array[Split] = thresholds.map(thresh => new ContinuousSplit(idx, thresh)) - logDebug(s"featureIndex = $idx, numSplits = ${splits.length}") - (idx, splits) - }.collectAsMap() - } - - val numFeatures = metadata.numFeatures - val splits: Array[Array[Split]] = Array.tabulate(numFeatures) { - case i if metadata.isContinuous(i) => - val split = continuousSplits(i) - metadata.setNumSplits(i, split.length) - split - - case i if metadata.isCategorical(i) && metadata.isUnordered(i) => - // Unordered features - // 2^(maxFeatureValue - 1) - 1 combinations - val featureArity = metadata.featureArity(i) - Array.tabulate[Split](metadata.numSplits(i)) { splitIndex => - val categories = extractMultiClassCategories(splitIndex + 1, featureArity) - new CategoricalSplit(i, categories.toArray, featureArity) - } - - case i if metadata.isCategorical(i) => - // Ordered features - // Splits are constructed as needed during training. - Array.empty[Split] - } - splits - } - - /** - * Nested method to extract list of eligible categories given an index. It extracts the - * position of ones in a binary representation of the input. If binary - * representation of an number is 01101 (13), the output list should (3.0, 2.0, - * 0.0). The maxFeatureValue depict the number of rightmost digits that will be tested for ones. - */ - private[tree] def extractMultiClassCategories( - input: Int, - maxFeatureValue: Int): List[Double] = { - var categories = List[Double]() - var j = 0 - var bitShiftedInput = input - while (j < maxFeatureValue) { - if (bitShiftedInput % 2 != 0) { - // updating the list of categories. - categories = j.toDouble :: categories - } - // Right shift by one - bitShiftedInput = bitShiftedInput >> 1 - j += 1 - } - categories - } - - /** - * Find splits for a continuous feature - * NOTE: Returned number of splits is set based on `featureSamples` and - * could be different from the specified `numSplits`. - * The `numSplits` attribute in the `DecisionTreeMetadata` class will be set accordingly. - * - * @param featureSamples feature values of each sample - * @param metadata decision tree metadata - * NOTE: `metadata.numbins` will be changed accordingly - * if there are not enough splits to be found - * @param featureIndex feature index to find splits - * @return array of split thresholds - */ - private[tree] def findSplitsForContinuousFeature( - featureSamples: Iterable[Double], - metadata: DecisionTreeMetadata, - featureIndex: Int): Array[Double] = { - require(metadata.isContinuous(featureIndex), - "findSplitsForContinuousFeature can only be used to find splits for a continuous feature.") - - val splits: Array[Double] = if (featureSamples.isEmpty) { - Array.empty[Double] - } else { - val numSplits = metadata.numSplits(featureIndex) - - // get count for each distinct value - val (valueCountMap, numSamples) = featureSamples.foldLeft((Map.empty[Double, Int], 0)) { - case ((m, cnt), x) => - (m + ((x, m.getOrElse(x, 0) + 1)), cnt + 1) - } - // sort distinct values - val valueCounts = valueCountMap.toSeq.sortBy(_._1).toArray - - val possibleSplits = valueCounts.length - 1 - if (possibleSplits == 0) { - // constant feature - Array.empty[Double] - } else if (possibleSplits <= numSplits) { - // if possible splits is not enough or just enough, just return all possible splits - (1 to possibleSplits) - .map(index => (valueCounts(index - 1)._1 + valueCounts(index)._1) / 2.0) - .toArray - } else { - // stride between splits - val stride: Double = numSamples.toDouble / (numSplits + 1) - logDebug(s"stride = ${stride}") - - // iterate `valueCount` to find splits - val splitsBuilder = mutable.ArrayBuilder.make[Double] - var index = 1 - // currentCount: sum of counts of values that have been visited - var currentCount = valueCounts(0)._2 - // targetCount: target value for `currentCount`. - // If `currentCount` is closest value to `targetCount`, - // then current value is a split threshold. - // After finding a split threshold, `targetCount` is added by stride. - var targetCount = stride - while (index < valueCounts.length) { - val previousCount = currentCount - currentCount += valueCounts(index)._2 - val previousGap = math.abs(previousCount - targetCount) - val currentGap = math.abs(currentCount - targetCount) - // If adding count of current value to currentCount - // makes the gap between currentCount and targetCount smaller, - // previous value is a split threshold. - if (previousGap < currentGap) { - splitsBuilder += (valueCounts(index - 1)._1 + valueCounts(index)._1) / 2.0 - targetCount += stride - } - index += 1 - } - - splitsBuilder.result() - } - } - splits - } - - private[tree] class NodeIndexInfo( - val nodeIndexInGroup: Int, - val featureSubset: Option[Array[Int]]) extends Serializable - - /** - * Pull nodes off of the queue, and collect a group of nodes to be split on this iteration. - * This tracks the memory usage for aggregates and stops adding nodes when too much memory - * will be needed; this allows an adaptive number of nodes since different nodes may require - * different amounts of memory (if featureSubsetStrategy is not "all"). - * - * @param nodeStack Queue of nodes to split. - * @param maxMemoryUsage Bound on size of aggregate statistics. - * @return (nodesForGroup, treeToNodeToIndexInfo). - * nodesForGroup holds the nodes to split: treeIndex --> nodes in tree. - * - * treeToNodeToIndexInfo holds indices selected features for each node: - * treeIndex --> (global) node index --> (node index in group, feature indices). - * The (global) node index is the index in the tree; the node index in group is the - * index in [0, numNodesInGroup) of the node in this group. - * The feature indices are None if not subsampling features. - */ - private[tree] def selectNodesToSplit( - nodeStack: mutable.ArrayStack[(Int, LearningNode)], - maxMemoryUsage: Long, - metadata: DecisionTreeMetadata, - rng: Random): (Map[Int, Array[LearningNode]], Map[Int, Map[Int, NodeIndexInfo]]) = { - // Collect some nodes to split: - // nodesForGroup(treeIndex) = nodes to split - val mutableNodesForGroup = new mutable.HashMap[Int, mutable.ArrayBuffer[LearningNode]]() - val mutableTreeToNodeToIndexInfo = - new mutable.HashMap[Int, mutable.HashMap[Int, NodeIndexInfo]]() - var memUsage: Long = 0L - var numNodesInGroup = 0 - // If maxMemoryInMB is set very small, we want to still try to split 1 node, - // so we allow one iteration if memUsage == 0. - var groupDone = false - while (nodeStack.nonEmpty && !groupDone) { - val (treeIndex, node) = nodeStack.top - // Choose subset of features for node (if subsampling). - val featureSubset: Option[Array[Int]] = if (metadata.subsamplingFeatures) { - Some(SamplingUtils.reservoirSampleAndCount(Range(0, - metadata.numFeatures).iterator, metadata.numFeaturesPerNode, rng.nextLong())._1) - } else { - None - } - // Check if enough memory remains to add this node to the group. - val nodeMemUsage = RandomForestRaw.aggregateSizeForNode(metadata, featureSubset) * 8L - if (memUsage + nodeMemUsage <= maxMemoryUsage || memUsage == 0) { - nodeStack.pop() - mutableNodesForGroup.getOrElseUpdate(treeIndex, new mutable.ArrayBuffer[LearningNode]()) += - node - mutableTreeToNodeToIndexInfo - .getOrElseUpdate(treeIndex, new mutable.HashMap[Int, NodeIndexInfo]())(node.id) - = new NodeIndexInfo(numNodesInGroup, featureSubset) - numNodesInGroup += 1 - memUsage += nodeMemUsage - } else { - groupDone = true - } - } - if (memUsage > maxMemoryUsage) { - // If maxMemoryUsage is 0, we should still allow splitting 1 node. - logWarning(s"Tree learning is using approximately $memUsage bytes per iteration, which" + - s" exceeds requested limit maxMemoryUsage=$maxMemoryUsage. This allows splitting" + - s" $numNodesInGroup nodes in this iteration.") - } - // Convert mutable maps to immutable ones. - val nodesForGroup: Map[Int, Array[LearningNode]] = - mutableNodesForGroup.mapValues(_.toArray).toMap - val treeToNodeToIndexInfo = mutableTreeToNodeToIndexInfo.mapValues(_.toMap).toMap - (nodesForGroup, treeToNodeToIndexInfo) - } - - /** - * Get the number of values to be stored for this node in the bin aggregates. - * - * @param featureSubset Indices of features which may be split at this node. - * If None, then use all features. - */ - private def aggregateSizeForNode( - metadata: DecisionTreeMetadata, - featureSubset: Option[Array[Int]]): Long = { - val totalBins = if (featureSubset.nonEmpty) { - featureSubset.get.map(featureIndex => metadata.numBins(featureIndex).toLong).sum - } else { - metadata.numBins.map(_.toLong).sum - } - if (metadata.isClassification) { - metadata.numClasses * totalBins - } else { - 3 * totalBins - } - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/treeParams.scala b/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/treeParams.scala deleted file mode 100644 index 38b79ec..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/ml/tree/treeParams.scala +++ /dev/null @@ -1,611 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree - -import java.util.Locale - -import scala.util.Try - -import org.apache.spark.ml.PredictorParams -import org.apache.spark.ml.param._ -import org.apache.spark.ml.param.shared._ -import org.apache.spark.ml.util.SchemaUtils -import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, BoostingStrategy => OldBoostingStrategy, Strategy => OldStrategy} -import org.apache.spark.mllib.tree.impurity.{Entropy => OldEntropy, Gini => OldGini, Impurity => OldImpurity, Variance => OldVariance} -import org.apache.spark.mllib.tree.loss.{AbsoluteError => OldAbsoluteError, ClassificationLoss => OldClassificationLoss, LogLoss => OldLogLoss, Loss => OldLoss, SquaredError => OldSquaredError} -import org.apache.spark.sql.types.{DataType, DoubleType, StructType} - -/** - * Parameters for Decision Tree-based algorithms. - * - * Note: Marked as private and DeveloperApi since this may be made public in the future. - */ -private[ml] trait DecisionTreeParams extends PredictorParams - with HasCheckpointInterval with HasSeed { - - /** - * Maximum depth of the tree (>= 0). - * E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes. - * (default = 5) - * @group param - */ - final val maxDepth: IntParam = - new IntParam(this, "maxDepth", "Maximum depth of the tree. (>= 0)" + - " E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.", - ParamValidators.gtEq(0)) - - /** - * Maximum number of bins used for discretizing continuous features and for choosing how to split - * on features at each node. More bins give higher granularity. - * Must be >= 2 and >= number of categories in any categorical feature. - * (default = 32) - * @group param - */ - final val maxBins: IntParam = new IntParam(this, "maxBins", "Max number of bins for" + - " discretizing continuous features. Must be >=2 and >= number of categories for any" + - " categorical feature.", ParamValidators.gtEq(2)) - - /** - * Minimum number of instances each child must have after split. - * If a split causes the left or right child to have fewer than minInstancesPerNode, - * the split will be discarded as invalid. - * Should be >= 1. - * (default = 1) - * @group param - */ - final val minInstancesPerNode: IntParam = new IntParam(this, "minInstancesPerNode", "Minimum" + - " number of instances each child must have after split. If a split causes the left or right" + - " child to have fewer than minInstancesPerNode, the split will be discarded as invalid." + - " Should be >= 1.", ParamValidators.gtEq(1)) - - /** - * Minimum information gain for a split to be considered at a tree node. - * Should be >= 0.0. - * (default = 0.0) - * @group param - */ - final val minInfoGain: DoubleParam = new DoubleParam(this, "minInfoGain", - "Minimum information gain for a split to be considered at a tree node.", - ParamValidators.gtEq(0.0)) - - /** - * Maximum memory in MB allocated to histogram aggregation. If too small, then 1 node will be - * split per iteration, and its aggregates may exceed this size. - * (default = 256 MB) - * @group expertParam - */ - final val maxMemoryInMB: IntParam = new IntParam(this, "maxMemoryInMB", - "Maximum memory in MB allocated to histogram aggregation.", - ParamValidators.gtEq(0)) - - /** - * If false, the algorithm will pass trees to executors to match instances with nodes. - * If true, the algorithm will cache node IDs for each instance. - * Caching can speed up training of deeper trees. Users can set how often should the - * cache be checkpointed or disable it by setting checkpointInterval. - * (default = false) - * @group expertParam - */ - final val cacheNodeIds: BooleanParam = new BooleanParam(this, "cacheNodeIds", "If false, the" + - " algorithm will pass trees to executors to match instances with nodes. If true, the" + - " algorithm will cache node IDs for each instance. Caching can speed up training of deeper" + - " trees.") - - setDefault(maxDepth -> 5, maxBins -> 32, minInstancesPerNode -> 1, minInfoGain -> 0.0, - maxMemoryInMB -> 256, cacheNodeIds -> false, checkpointInterval -> 10) - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setMaxDepth(value: Int): this.type = set(maxDepth, value) - - /** @group getParam */ - final def getMaxDepth: Int = $(maxDepth) - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setMaxBins(value: Int): this.type = set(maxBins, value) - - /** @group getParam */ - final def getMaxBins: Int = $(maxBins) - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value) - - /** @group getParam */ - final def getMinInstancesPerNode: Int = $(minInstancesPerNode) - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setMinInfoGain(value: Double): this.type = set(minInfoGain, value) - - /** @group getParam */ - final def getMinInfoGain: Double = $(minInfoGain) - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setSeed(value: Long): this.type = set(seed, value) - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group expertSetParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value) - - /** @group expertGetParam */ - final def getMaxMemoryInMB: Int = $(maxMemoryInMB) - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group expertSetParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value) - - /** @group expertGetParam */ - final def getCacheNodeIds: Boolean = $(cacheNodeIds) - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value) - - /** (private[ml]) Create a Strategy instance to use with the old API. */ - private[ml] def getOldStrategy( - categoricalFeatures: Map[Int, Int], - numClasses: Int, - oldAlgo: OldAlgo.Algo, - oldImpurity: OldImpurity, - subsamplingRate: Double): OldStrategy = { - val strategy = OldStrategy.defaultStrategy(oldAlgo) - strategy.impurity = oldImpurity - strategy.checkpointInterval = getCheckpointInterval - strategy.maxBins = getMaxBins - strategy.maxDepth = getMaxDepth - strategy.maxMemoryInMB = getMaxMemoryInMB - strategy.minInfoGain = getMinInfoGain - strategy.minInstancesPerNode = getMinInstancesPerNode - strategy.useNodeIdCache = getCacheNodeIds - strategy.numClasses = numClasses - strategy.categoricalFeaturesInfo = categoricalFeatures - strategy.subsamplingRate = subsamplingRate - strategy - } -} - -/** - * Parameters for Decision Tree-based classification algorithms. - */ -private[ml] trait TreeClassifierParams extends Params { - - /** - * Criterion used for information gain calculation (case-insensitive). - * Supported: "entropy" and "gini". - * (default = gini) - * @group param - */ - final val impurity: Param[String] = new Param[String](this, "impurity", "Criterion used for" + - " information gain calculation (case-insensitive). Supported options:" + - s" ${TreeClassifierParams.supportedImpurities.mkString(", ")}", - (value: String) => - TreeClassifierParams.supportedImpurities.contains(value.toLowerCase(Locale.ROOT))) - - setDefault(impurity -> "gini") - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setImpurity(value: String): this.type = set(impurity, value) - - /** @group getParam */ - final def getImpurity: String = $(impurity).toLowerCase(Locale.ROOT) - - /** Convert new impurity to old impurity. */ - private[ml] def getOldImpurity: OldImpurity = { - getImpurity match { - case "entropy" => OldEntropy - case "gini" => OldGini - case _ => - // Should never happen because of check in setter method. - throw new RuntimeException( - s"TreeClassifierParams was given unrecognized impurity: $impurity.") - } - } -} - -private[ml] object TreeClassifierParams { - // These options should be lowercase. - final val supportedImpurities: Array[String] = - Array("entropy", "gini").map(_.toLowerCase(Locale.ROOT)) -} - -private[ml] trait DecisionTreeClassifierParams - extends DecisionTreeParams with TreeClassifierParams - -/** - * Parameters for Decision Tree-based regression algorithms. - */ -private[ml] trait TreeRegressorParams extends Params { - - /** - * Criterion used for information gain calculation (case-insensitive). - * Supported: "variance". - * (default = variance) - * @group param - */ - final val impurity: Param[String] = new Param[String](this, "impurity", "Criterion used for" + - " information gain calculation (case-insensitive). Supported options:" + - s" ${TreeRegressorParams.supportedImpurities.mkString(", ")}", - (value: String) => - TreeRegressorParams.supportedImpurities.contains(value.toLowerCase(Locale.ROOT))) - - setDefault(impurity -> "variance") - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setImpurity(value: String): this.type = set(impurity, value) - - /** @group getParam */ - final def getImpurity: String = $(impurity).toLowerCase(Locale.ROOT) - - /** Convert new impurity to old impurity. */ - private[ml] def getOldImpurity: OldImpurity = { - getImpurity match { - case "variance" => OldVariance - case _ => - // Should never happen because of check in setter method. - throw new RuntimeException( - s"TreeRegressorParams was given unrecognized impurity: $impurity") - } - } -} - -private[ml] object TreeRegressorParams { - // These options should be lowercase. - final val supportedImpurities: Array[String] = - Array("variance").map(_.toLowerCase(Locale.ROOT)) -} - -private[ml] trait DecisionTreeRegressorParams extends DecisionTreeParams - with TreeRegressorParams with HasVarianceCol { - - override protected def validateAndTransformSchema( - schema: StructType, - fitting: Boolean, - featuresDataType: DataType): StructType = { - val newSchema = super.validateAndTransformSchema(schema, fitting, featuresDataType) - if (isDefined(varianceCol) && $(varianceCol).nonEmpty) { - SchemaUtils.appendColumn(newSchema, $(varianceCol), DoubleType) - } else { - newSchema - } - } -} - -private[spark] object TreeEnsembleParams { - // These options should be lowercase. - final val supportedFeatureSubsetStrategies: Array[String] = - Array("auto", "all", "onethird", "sqrt", "log2").map(_.toLowerCase(Locale.ROOT)) -} - -/** - * Parameters for Decision Tree-based ensemble algorithms. - * - * Note: Marked as private and DeveloperApi since this may be made public in the future. - */ -private[ml] trait TreeEnsembleParams extends DecisionTreeParams { - - /** - * Fraction of the training data used for learning each decision tree, in range (0, 1]. - * (default = 1.0) - * @group param - */ - final val subsamplingRate: DoubleParam = new DoubleParam(this, "subsamplingRate", - "Fraction of the training data used for learning each decision tree, in range (0, 1].", - ParamValidators.inRange(0, 1, lowerInclusive = false, upperInclusive = true)) - - setDefault(subsamplingRate -> 1.0) - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setSubsamplingRate(value: Double): this.type = set(subsamplingRate, value) - - /** @group getParam */ - final def getSubsamplingRate: Double = $(subsamplingRate) - - /** - * Create a Strategy instance to use with the old API. - * NOTE: The caller should set impurity and seed. - */ - private[ml] def getOldStrategy( - categoricalFeatures: Map[Int, Int], - numClasses: Int, - oldAlgo: OldAlgo.Algo, - oldImpurity: OldImpurity): OldStrategy = { - super.getOldStrategy(categoricalFeatures, numClasses, oldAlgo, oldImpurity, getSubsamplingRate) - } - - /** - * The number of features to consider for splits at each tree node. - * Supported options: - * - "auto": Choose automatically for task: - * If numTrees == 1, set to "all." - * If numTrees > 1 (forest), set to "sqrt" for classification and - * to "onethird" for regression. - * - "all": use all features - * - "onethird": use 1/3 of the features - * - "sqrt": use sqrt(number of features) - * - "log2": use log2(number of features) - * - "n": when n is in the range (0, 1.0], use n * number of features. When n - * is in the range (1, number of features), use n features. - * (default = "auto") - * - * These various settings are based on the following references: - * - log2: tested in Breiman (2001) - * - sqrt: recommended by Breiman manual for random forests - * - The defaults of sqrt (classification) and onethird (regression) match the R randomForest - * package. - * @see Breiman (2001) - * @see - * Breiman manual for random forests - * - * @group param - */ - final val featureSubsetStrategy: Param[String] = new Param[String](this, "featureSubsetStrategy", - "The number of features to consider for splits at each tree node." + - s" Supported options: ${TreeEnsembleParams.supportedFeatureSubsetStrategies.mkString(", ")}" + - s", (0.0-1.0], [1-n].", - (value: String) => - TreeEnsembleParams.supportedFeatureSubsetStrategies.contains( - value.toLowerCase(Locale.ROOT)) - || Try(value.toInt).filter(_ > 0).isSuccess - || Try(value.toDouble).filter(_ > 0).filter(_ <= 1.0).isSuccess) - - setDefault(featureSubsetStrategy -> "auto") - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0 - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setFeatureSubsetStrategy(value: String): this.type = set(featureSubsetStrategy, value) - - /** @group getParam */ - final def getFeatureSubsetStrategy: String = $(featureSubsetStrategy).toLowerCase(Locale.ROOT) -} - - - -/** - * Parameters for Random Forest algorithms. - */ -private[ml] trait RandomForestParams extends TreeEnsembleParams { - - /** - * Number of trees to train (>= 1). - * If 1, then no bootstrapping is used. If > 1, then bootstrapping is done. - * TODO: Change to always do bootstrapping (simpler). SPARK-7130 - * (default = 20) - * - * Note: The reason that we cannot add this to both GBT and RF (i.e. in TreeEnsembleParams) - * is the param `maxIter` controls how many trees a GBT has. The semantics in the algorithms - * are a bit different. - * @group param - */ - final val numTrees: IntParam = new IntParam(this, "numTrees", "Number of trees to train (>= 1)", - ParamValidators.gtEq(1)) - - setDefault(numTrees -> 20) - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setNumTrees(value: Int): this.type = set(numTrees, value) - - /** @group getParam */ - final def getNumTrees: Int = $(numTrees) -} - -private[ml] trait RandomForestClassifierParams - extends RandomForestParams with TreeClassifierParams - -private[ml] trait RandomForestRegressorParams - extends RandomForestParams with TreeRegressorParams - -/** - * Parameters for Gradient-Boosted Tree algorithms. - * - * Note: Marked as private and DeveloperApi since this may be made public in the future. - */ -private[ml] trait GBTParams extends TreeEnsembleParams with HasMaxIter with HasStepSize { - - /* TODO: Add this doc when we add this param. SPARK-7132 - * Threshold for stopping early when runWithValidation is used. - * If the error rate on the validation input changes by less than the validationTol, - * then learning will stop early (before [[numIterations]]). - * This parameter is ignored when run is used. - * (default = 1e-5) - * @group param - */ - // final val validationTol: DoubleParam = new DoubleParam(this, "validationTol", "") - // validationTol -> 1e-5 - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setMaxIter(value: Int): this.type = set(maxIter, value) - - /** - * Param for Step size (a.k.a. learning rate) in interval (0, 1] for shrinking - * the contribution of each estimator. - * (default = 0.1) - * @group param - */ - final override val stepSize: DoubleParam = new DoubleParam(this, "stepSize", "Step size " + - "(a.k.a. learning rate) in interval (0, 1] for shrinking the contribution of each estimator.", - ParamValidators.inRange(0, 1, lowerInclusive = false, upperInclusive = true)) - - /** - * @deprecated This method is deprecated and will be removed in 3.0.0. - * @group setParam - */ - @deprecated("This method is deprecated and will be removed in 3.0.0.", "2.1.0") - def setStepSize(value: Double): this.type = set(stepSize, value) - - setDefault(maxIter -> 20, stepSize -> 0.1) - - setDefault(featureSubsetStrategy -> "all") - - /** (private[ml]) Create a BoostingStrategy instance to use with the old API. */ - private[ml] def getOldBoostingStrategy( - categoricalFeatures: Map[Int, Int], - oldAlgo: OldAlgo.Algo): OldBoostingStrategy = { - val strategy = super.getOldStrategy(categoricalFeatures, numClasses = 2, oldAlgo, OldVariance) - // NOTE: The old API does not support "seed" so we ignore it. - new OldBoostingStrategy(strategy, getOldLossType, getMaxIter, getStepSize) - } - - /** Get old Gradient Boosting Loss type */ - private[ml] def getOldLossType: OldLoss - - final val doUseAcc: BooleanParam = new BooleanParam(this, "doUseAcc", - "If true, use the optimized algorithm; otherwise, use the raw version") - - var setUseAccFlag = false - - /** Set algorithm to the raw version. */ - def setDoUseAcc(value: Boolean): this.type = { - setUseAccFlag = true - set(doUseAcc, value) - } - setDefault(doUseAcc -> true) - - /** Get algorithm type. */ - def getDoUseAcc: (Boolean, Boolean) = ($(doUseAcc), setUseAccFlag) -} - -private[ml] object GBTClassifierParams { - // The losses below should be lowercase. - /** Accessor for supported loss settings: logistic */ - final val supportedLossTypes: Array[String] = - Array("logistic").map(_.toLowerCase(Locale.ROOT)) -} - -private[ml] trait GBTClassifierParams extends GBTParams with TreeClassifierParams { - - /** - * Loss function which GBT tries to minimize. (case-insensitive) - * Supported: "logistic" - * (default = logistic) - * @group param - */ - val lossType: Param[String] = new Param[String](this, "lossType", "Loss function which GBT" + - " tries to minimize (case-insensitive). Supported options:" + - s" ${GBTClassifierParams.supportedLossTypes.mkString(", ")}", - (value: String) => - GBTClassifierParams.supportedLossTypes.contains(value.toLowerCase(Locale.ROOT))) - - setDefault(lossType -> "logistic") - - /** @group getParam */ - def getLossType: String = $(lossType).toLowerCase(Locale.ROOT) - - /** (private[ml]) Convert new loss to old loss. */ - override private[ml] def getOldLossType: OldClassificationLoss = { - getLossType match { - case "logistic" => OldLogLoss - case _ => - // Should never happen because of check in setter method. - throw new RuntimeException(s"GBTClassifier was given bad loss type: $getLossType") - } - } -} - -private[ml] object GBTRegressorParams { - // The losses below should be lowercase. - /** Accessor for supported loss settings: squared (L2), absolute (L1) */ - final val supportedLossTypes: Array[String] = - Array("squared", "absolute").map(_.toLowerCase(Locale.ROOT)) -} - -private[ml] trait GBTRegressorParams extends GBTParams with TreeRegressorParams { - - /** - * Loss function which GBT tries to minimize. (case-insensitive) - * Supported: "squared" (L2) and "absolute" (L1) - * (default = squared) - * @group param - */ - val lossType: Param[String] = new Param[String](this, "lossType", "Loss function which GBT" + - " tries to minimize (case-insensitive). Supported options:" + - s" ${GBTRegressorParams.supportedLossTypes.mkString(", ")}", - (value: String) => - GBTRegressorParams.supportedLossTypes.contains(value.toLowerCase(Locale.ROOT))) - - setDefault(lossType -> "squared") - - /** @group getParam */ - def getLossType: String = $(lossType).toLowerCase(Locale.ROOT) - - /** (private[ml]) Convert new loss to old loss. */ - override private[ml] def getOldLossType: OldLoss = { - getLossType match { - case "squared" => OldSquaredError - case "absolute" => OldAbsoluteError - case _ => - // Should never happen because of check in setter method. - throw new RuntimeException(s"GBTRegressorParams was given bad loss type: $getLossType") - } - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/KMACCm.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/KMACCm.scala deleted file mode 100644 index 2f9094c..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/KMACCm.scala +++ /dev/null @@ -1,124 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.spark.mllib.clustering - -import org.apache.spark.broadcast.Broadcast -import org.apache.spark.mllib.linalg.BLAS.{axpy, scal} -import org.apache.spark.mllib.linalg.Vectors -import org.apache.spark.rdd.RDD -import org.apache.spark.sql.SparkSession - -object KMACCm { - val DEFAULT_SAMPLE_RATE = 0.05 - - def generateNewCenters( - data: RDD[VectorWithNorm], - bcs: Broadcast[Array[Double]], - bcCenters: Broadcast[Array[VectorWithNorm]]): Map[Int, VectorWithNorm] = { - val newCenters = data - .mapPartitions { points => - val thisS = bcs.value - val thisCenters = bcCenters.value - val dims = thisCenters.head.vector.size - val sums = Array.fill(thisCenters.length)(Vectors.zeros(dims)) - val counts = Array.fill(thisCenters.length)(0L) - - points.foreach { point => - val (bestCenter, cost) = KmeansUtil.findClosest(thisCenters, point, thisS) - val sum = sums(bestCenter) - axpy(1.0, point.vector, sum) - counts(bestCenter) += 1 - } - counts.indices.filter(counts(_) > 0).map(j => (j, (sums(j), counts(j)))).iterator - }.reduceByKey { case ((sum1, count1), (sum2, count2)) => - axpy(1.0, sum2, sum1) - (sum1, count1 + count2) - }.mapValues { case (sum, count) => - scal(1.0 / count, sum) - new VectorWithNorm(sum) - }.collectAsMap() - newCenters.toMap - } - - def compute( - data: RDD[VectorWithNorm], - centers: Array[VectorWithNorm], - maxIterations: Int, - epsilon: Double, - enableMiniBatch: Boolean): Unit = { - var converged = false - var iteration = 0 - val cl = centers.length - val p = Array.fill(cl)(0.0) - val sc = data.sparkContext - - var sampleRate = DEFAULT_SAMPLE_RATE - try { - sampleRate = sc.getConf.getDouble("spark.boostkit.Kmeans.sampleRate", - DEFAULT_SAMPLE_RATE) - if (sampleRate < 0.0) { - throw new Exception - } - } - catch { - case x: Exception => - throw new Exception("'spark.boostkit.Kmeans.sampleRate' value is invalid") - } - - val DEFAULT_PAR_LEVEL = 100 - var customParLevel = DEFAULT_PAR_LEVEL - try{ - customParLevel = SparkSession.builder().getOrCreate() - .sparkContext.getConf.getInt("spark.boostkit.Kmeans.parLevel", - DEFAULT_PAR_LEVEL) - if (customParLevel < 1) { - throw new Exception - } - } - catch { - case x: Exception => - throw new Exception("'spark.boostkit.Kmeans.parLevel' value is invalid") - } - - while (iteration < maxIterations && !converged) { - val s = KmeansUtil.generateDisMatrix(centers, customParLevel) - val bcCenters = sc.broadcast(centers) - val bcs = sc.broadcast(s) - - // Find the new centers - val newCenters = if (!enableMiniBatch) generateNewCenters(data, bcs, bcCenters) - else generateNewCenters(data.sample(false, sampleRate), bcs, bcCenters) - converged = true - newCenters.foreach { case (j, newCenter) => - p(j) = KmeansUtil.fastDistance(newCenter, centers(j)) - if (converged && p(j) > epsilon) { - converged = false - } - centers(j) = newCenter - } - bcCenters.destroy(blocking = false) - bcs.destroy(blocking = false) - iteration += 1 - } - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala deleted file mode 100644 index 4ecbcc3..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala +++ /dev/null @@ -1,616 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.clustering - -import scala.collection.mutable.ArrayBuffer - -import org.apache.spark.annotation.Since -import org.apache.spark.broadcast.Broadcast -import org.apache.spark.internal.Logging -import org.apache.spark.ml.clustering.{KMeans => NewKMeans} -import org.apache.spark.ml.util.Instrumentation -import org.apache.spark.mllib.linalg.{Vector, Vectors} -import org.apache.spark.mllib.linalg.BLAS.{axpy, scal} -import org.apache.spark.mllib.util.MLUtils -import org.apache.spark.rdd.RDD -import org.apache.spark.storage.StorageLevel -import org.apache.spark.util.Utils -import org.apache.spark.util.random.XORShiftRandom - -/** - * K-means clustering with a k-means++ like initialization mode - * (the k-means|| algorithm by Bahmani et al). - * - * This is an iterative algorithm that will make multiple passes over the data, so any RDDs given - * to it should be cached by the user. - */ -@Since("0.8.0") -class KMeans private( - private var k: Int, - private var maxIterations: Int, - private var initializationMode: String, - private var initializationSteps: Int, - private var epsilon: Double, - private var seed: Long) extends Serializable with Logging { - - /** - * Constructs a KMeans instance with default parameters: {k: 2, maxIterations: 20, - * initializationMode: "k-means||", initializationSteps: 2, epsilon: 1e-4, seed: random}. - */ - @Since("0.8.0") - def this() = this(2, 20, KMeans.K_MEANS_PARALLEL, 2, 1e-4, Utils.random.nextLong()) - - /** - * Number of clusters to create (k). - * - * @note It is possible for fewer than k clusters to - * be returned, for example, if there are fewer than k distinct points to cluster. - */ - @Since("1.4.0") - def getK: Int = k - - /** - * Set the number of clusters to create (k). - * - * @note It is possible for fewer than k clusters to - * be returned, for example, if there are fewer than k distinct points to cluster. Default: 2. - */ - @Since("0.8.0") - def setK(k: Int): this.type = { - require(k > 0 && k < Math.sqrt(Int.MaxValue), - s"Number of clusters must be positive and less than sqrt of Int.MaxValue, but got ${k}") - this.k = k - this - } - - /** - * Maximum number of iterations allowed. - */ - @Since("1.4.0") - def getMaxIterations: Int = maxIterations - - /** - * Set maximum number of iterations allowed. Default: 20. - */ - @Since("0.8.0") - def setMaxIterations(maxIterations: Int): this.type = { - require(maxIterations >= 0, - s"Maximum of iterations must be nonnegative but got ${maxIterations}") - this.maxIterations = maxIterations - this - } - - /** - * The initialization algorithm. This can be either "random" or "k-means||". - */ - @Since("1.4.0") - def getInitializationMode: String = initializationMode - - /** - * Set the initialization algorithm. This can be either "random" to choose random points as - * initial cluster centers, or "k-means||" to use a parallel variant of k-means++ - * (Bahmani et al., Scalable K-Means++, VLDB 2012). Default: k-means||. - */ - @Since("0.8.0") - def setInitializationMode(initializationMode: String): this.type = { - KMeans.validateInitMode(initializationMode) - this.initializationMode = initializationMode - this - } - - /** - * This function has no effect since Spark 2.0.0. - */ - @Since("1.4.0") - @deprecated("This has no effect and always returns 1", "2.1.0") - def getRuns: Int = { - logWarning("Getting number of runs has no effect since Spark 2.0.0.") - 1 - } - - /** - * This function has no effect since Spark 2.0.0. - */ - @Since("0.8.0") - @deprecated("This has no effect", "2.1.0") - def setRuns(runs: Int): this.type = { - logWarning("Setting number of runs has no effect since Spark 2.0.0.") - this - } - - /** - * Number of steps for the k-means|| initialization mode - */ - @Since("1.4.0") - def getInitializationSteps: Int = initializationSteps - - /** - * Set the number of steps for the k-means|| initialization mode. This is an advanced - * setting -- the default of 2 is almost always enough. Default: 2. - */ - @Since("0.8.0") - def setInitializationSteps(initializationSteps: Int): this.type = { - require(initializationSteps > 0, - s"Number of initialization steps must be positive but got ${initializationSteps}") - this.initializationSteps = initializationSteps - this - } - - /** - * The distance threshold within which we've consider centers to have converged. - */ - @Since("1.4.0") - def getEpsilon: Double = epsilon - - /** - * Set the distance threshold within which we've consider centers to have converged. - * If all centers move less than this Euclidean distance, we stop iterating one run. - */ - @Since("0.8.0") - def setEpsilon(epsilon: Double): this.type = { - require(epsilon >= 0, - s"Distance threshold must be nonnegative but got ${epsilon}") - this.epsilon = epsilon - this - } - - /** - * The random seed for cluster initialization. - */ - @Since("1.4.0") - def getSeed: Long = seed - - /** - * Set the random seed for cluster initialization. - */ - @Since("1.4.0") - def setSeed(seed: Long): this.type = { - this.seed = seed - this - } - - // Initial cluster centers can be provided as a KMeansModel object rather than using the - // random or k-means|| initializationMode - private var initialModel: Option[KMeansModel] = None - - /** - * Set the initial starting point, bypassing the random initialization or k-means|| - * The condition model.k == this.k must be met, failure results - * in an IllegalArgumentException. - */ - @Since("1.4.0") - def setInitialModel(model: KMeansModel): this.type = { - require(model.k == k, "mismatched cluster count") - initialModel = Some(model) - this - } - - /** - * Train a K-means model on the given set of points; `data` should be cached for high - * performance, because this is an iterative algorithm. - */ - @Since("0.8.0") - def run(data: RDD[Vector]): KMeansModel = { - run(data, None) - } - - private[spark] def run( - data: RDD[Vector], - instr: Option[Instrumentation[NewKMeans]]): KMeansModel = { - - if (data.getStorageLevel == StorageLevel.NONE) { - logWarning("The input data is not directly cached, which may hurt performance if its" - + " parent RDDs are also uncached.") - } - - // Compute squared norms and cache them. - val norms = data.map(Vectors.norm(_, 2.0)) - norms.persist() - val zippedData = data.zip(norms).map { case (v, norm) => - new VectorWithNorm(v, norm) - } - val model = runAlgorithm(zippedData, instr) - norms.unpersist() - - // Warn at the end of the run as well, for increased visibility. - if (data.getStorageLevel == StorageLevel.NONE) { - logWarning("The input data was not directly cached, which may hurt performance if its" - + " parent RDDs are also uncached.") - } - model - } - - /** - * Implementation of K-Means algorithm. - */ - private def runAlgorithm( - data: RDD[VectorWithNorm], - instr: Option[Instrumentation[NewKMeans]]): KMeansModel = { - - val sc = data.sparkContext - val initStartTime = System.nanoTime() - val centers = initialModel match { - case Some(kMeansCenters) => - kMeansCenters.clusterCenters.map(new VectorWithNorm(_)) - case None => - if (initializationMode == KMeans.RANDOM) { - initRandom(data) - } else { - initKMeansParallel(data) - } - } - val centersR = centers.clone() - val cl = centers.length - val initTimeInSeconds = (System.nanoTime() - initStartTime) / 1e9 - logInfo(f"Initialization with $initializationMode took $initTimeInSeconds%.3f seconds.") - var converged = false - var cost = 0.0 - var iteration = 0 - val iterationStartTime = System.nanoTime() - instr.foreach(_.logNumFeatures(centers.head.vector.size)) - - // Execute iterations of Lloyd's algorithm until converged - if (cl > 1) { - val methodEnum = Array("default", "allData") - val method = sc.getConf.get("spark.boostkit.Kmeans.optMethod", "default") - if (!methodEnum.contains(method)) { - throw new Exception("'spark.boostkit.Kmeans.optMethod' value is invalid") - } - if (method == "allData") { - KMACCm.compute(data, centers, maxIterations, epsilon, false) - } else { - KMACCm.compute(data, centers, maxIterations, epsilon, true) - } - } else { - iteration = 0 - converged = false - cost = 0.0 - while (iteration < maxIterations && !converged) { - val costAccum = sc.doubleAccumulator - val bcCenters = sc.broadcast(centersR) - - // Find the new centers - val newCenters = data.mapPartitions { points => - val thisCenters = bcCenters.value - val dims = thisCenters.head.vector.size - val sums = Array.fill(thisCenters.length)(Vectors.zeros(dims)) - val counts = Array.fill(thisCenters.length)(0L) - points.foreach { point => - val (bestCenter, cost) = KMeans.findClosest(thisCenters, point) - costAccum.add(cost) - val sum = sums(bestCenter) - axpy(1.0, point.vector, sum) - counts(bestCenter) += 1 - } - counts.indices.filter(counts(_) > 0).map(j => (j, (sums(j), counts(j)))).iterator - }.reduceByKey { case ((sum1, count1), (sum2, count2)) => - axpy(1.0, sum2, sum1) - (sum1, count1 + count2) - }.mapValues { case (sum, count) => - scal(1.0 / count, sum) - new VectorWithNorm(sum) - }.collectAsMap() - bcCenters.destroy(blocking = false) - - // Update the cluster centers and costs - converged = true - newCenters.foreach { case (j, newCenter) => - if (converged && KMeans.fastSquaredDistance(newCenter, centersR(j)) > epsilon * epsilon) { - converged = false - } - centersR(j) = newCenter - } - cost = costAccum.value - iteration += 1 - } - } - - val iterationTimeInSeconds = (System.nanoTime() - iterationStartTime) / 1e9 - logInfo(f"Iterations took $iterationTimeInSeconds%.3f seconds.") - if (iteration == maxIterations) { - logInfo(s"KMeansX reached the max number of iterations: $maxIterations.") - } else { - logInfo(s"KMeansX converged in $iteration iterations.") - } - if (cl > 1) { - new KMeansModel(centers.map(_.vector)) - } - else { - new KMeansModel(centersR.map(_.vector)) - } - } - - /** - * Initialize a set of cluster centers at random. - */ - private def initRandom(data: RDD[VectorWithNorm]): Array[VectorWithNorm] = { - // Select without replacement; may still produce duplicates if the data has < k distinct - // points, so deduplicate the centroids to match the behavior of k-means|| in the same situation - data.takeSample(false, k, new XORShiftRandom(this.seed).nextInt()) - .map(_.vector).distinct.map(new VectorWithNorm(_)) - } - - - /** - * Initialize a set of cluster centers using the k-means|| algorithm by Bahmani et al. - * (Bahmani et al., Scalable K-Means++, VLDB 2012). This is a variant of k-means++ that tries - * to find dissimilar cluster centers by starting with a random center and then doing - * passes where more centers are chosen with probability proportional to their squared distance - * to the current cluster set. It results in a provable approximation to an optimal clustering. - * - * The original paper can be found at http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf. - */ - private[clustering] def initKMeansParallel(data: RDD[VectorWithNorm]): Array[VectorWithNorm] = { - // Initialize empty centers and point costs. - var costs = data.map(_ => Double.PositiveInfinity) - - // Initialize the first center to a random point. - val seed = new XORShiftRandom(this.seed).nextInt() - val sample = data.takeSample(false, 1, seed) - // Could be empty if data is empty; fail with a better message early: - require(sample.nonEmpty, s"No samples available from $data") - - val centers = ArrayBuffer[VectorWithNorm]() - var newCenters = Seq(sample.head.toDense) - centers ++= newCenters - - // On each step, sample 2 * k points on average with probability proportional - // to their squared distance from the centers. Note that only distances between points - // and new centers are computed in each iteration. - var step = 0 - val bcNewCentersList = ArrayBuffer[Broadcast[_]]() - while (step < initializationSteps) { - val bcNewCenters = data.context.broadcast(newCenters) - bcNewCentersList += bcNewCenters - val preCosts = costs - costs = data.zip(preCosts).map { case (point, cost) => - math.min(KMeans.pointCost(bcNewCenters.value, point), cost) - }.persist(StorageLevel.MEMORY_AND_DISK) - val sumCosts = costs.sum() - - bcNewCenters.unpersist(blocking = false) - preCosts.unpersist(blocking = false) - - val chosen = data.zip(costs).mapPartitionsWithIndex { (index, pointCosts) => - val rand = new XORShiftRandom(seed ^ (step << 16) ^ index) - pointCosts.filter { case (_, c) => rand.nextDouble() < 2.0 * c * k / sumCosts }.map(_._1) - }.collect() - newCenters = chosen.map(_.toDense) - centers ++= newCenters - step += 1 - } - - costs.unpersist(blocking = false) - bcNewCentersList.foreach(_.destroy(false)) - - val distinctCenters = centers.map(_.vector).distinct.map(new VectorWithNorm(_)) - - if (distinctCenters.size <= k) { - distinctCenters.toArray - } else { - // Finally, we might have a set of more than k distinct candidate centers; weight each - // candidate by the number of points in the dataset mapping to it and run a local k-means++ - // on the weighted centers to pick k of them - val bcCenters = data.context.broadcast(distinctCenters) - val countMap = data.map(KMeans.findClosest(bcCenters.value, _)._1).countByValue() - - bcCenters.destroy(blocking = false) - - val myWeights = distinctCenters.indices.map(countMap.getOrElse(_, 0L).toDouble).toArray - LocalKMeansX.kMeansPlusPlus(0, distinctCenters.toArray, myWeights, k, 30) - } - } -} - - -/** - * Top-level methods for calling K-means clustering. - */ -@Since("0.8.0") -object KMeans { - - // Initialization mode names - @Since("0.8.0") - val RANDOM = "random" - @Since("0.8.0") - val K_MEANS_PARALLEL = "k-means||" - - /** - * Trains a k-means model using the given set of parameters. - * - * @param data Training points as an `RDD` of `Vector` types. - * @param k Number of clusters to create. - * @param maxIterations Maximum number of iterations allowed. - * @param initializationMode The initialization algorithm. This can either be "random" or - * "k-means||". (default: "k-means||") - * @param seed Random seed for cluster initialization. Default is to generate seed based - * on system time. - */ - @Since("2.1.0") - def train( - data: RDD[Vector], - k: Int, - maxIterations: Int, - initializationMode: String, - seed: Long): KMeansModel = { - new KMeans().setK(k) - .setMaxIterations(maxIterations) - .setInitializationMode(initializationMode) - .setSeed(seed) - .run(data) - } - - /** - * Trains a k-means model using the given set of parameters. - * - * @param data Training points as an `RDD` of `Vector` types. - * @param k Number of clusters to create. - * @param maxIterations Maximum number of iterations allowed. - * @param initializationMode The initialization algorithm. This can either be "random" or - * "k-means||". (default: "k-means||") - */ - @Since("2.1.0") - def train( - data: RDD[Vector], - k: Int, - maxIterations: Int, - initializationMode: String): KMeansModel = { - new KMeans().setK(k) - .setMaxIterations(maxIterations) - .setInitializationMode(initializationMode) - .run(data) - } - - /** - * Trains a k-means model using the given set of parameters. - * - * @param data Training points as an `RDD` of `Vector` types. - * @param k Number of clusters to create. - * @param maxIterations Maximum number of iterations allowed. - * @param runs This param has no effect since Spark 2.0.0. - * @param initializationMode The initialization algorithm. This can either be "random" or - * "k-means||". (default: "k-means||") - * @param seed Random seed for cluster initialization. Default is to generate seed based - * on system time. - */ - @Since("1.3.0") - @deprecated("Use train method without 'runs'", "2.1.0") - def train( - data: RDD[Vector], - k: Int, - maxIterations: Int, - runs: Int, - initializationMode: String, - seed: Long): KMeansModel = { - new KMeans().setK(k) - .setMaxIterations(maxIterations) - .setInitializationMode(initializationMode) - .setSeed(seed) - .run(data) - } - - /** - * Trains a k-means model using the given set of parameters. - * - * @param data Training points as an `RDD` of `Vector` types. - * @param k Number of clusters to create. - * @param maxIterations Maximum number of iterations allowed. - * @param runs This param has no effect since Spark 2.0.0. - * @param initializationMode The initialization algorithm. This can either be "random" or - * "k-means||". (default: "k-means||") - */ - @Since("0.8.0") - @deprecated("Use train method without 'runs'", "2.1.0") - def train( - data: RDD[Vector], - k: Int, - maxIterations: Int, - runs: Int, - initializationMode: String): KMeansModel = { - new KMeans().setK(k) - .setMaxIterations(maxIterations) - .setInitializationMode(initializationMode) - .run(data) - } - - /** - * Trains a k-means model using specified parameters and the default values for unspecified. - */ - @Since("0.8.0") - def train( - data: RDD[Vector], - k: Int, - maxIterations: Int): KMeansModel = { - new KMeans().setK(k) - .setMaxIterations(maxIterations) - .run(data) - } - - /** - * Trains a k-means model using specified parameters and the default values for unspecified. - */ - @Since("0.8.0") - @deprecated("Use train method without 'runs'", "2.1.0") - def train( - data: RDD[Vector], - k: Int, - maxIterations: Int, - runs: Int): KMeansModel = { - new KMeans().setK(k) - .setMaxIterations(maxIterations) - .run(data) - } - - /** - * Returns the index of the closest center to the given point, as well as the squared distance. - */ - private[mllib] def findClosest( - centers: TraversableOnce[VectorWithNorm], - point: VectorWithNorm): (Int, Double) = { - var bestDistance = Double.PositiveInfinity - var bestIndex = 0 - var i = 0 - centers.foreach { center => - // Since `\|a - b\| \geq |\|a\| - \|b\||`, we can use this lower bound to avoid unnecessary - // distance computation. - var lowerBoundOfSqDist = center.norm - point.norm - lowerBoundOfSqDist = lowerBoundOfSqDist * lowerBoundOfSqDist - if (lowerBoundOfSqDist < bestDistance) { - val distance: Double = fastSquaredDistance(center, point) - if (distance < bestDistance) { - bestDistance = distance - bestIndex = i - } - } - i += 1 - } - (bestIndex, bestDistance) - } - - /** - * Returns the K-means cost of a given point against the given cluster centers. - */ - private[mllib] def pointCost( - centers: TraversableOnce[VectorWithNorm], - point: VectorWithNorm): Double = - findClosest(centers, point)._2 - - /** - * Returns the squared Euclidean distance between two vectors computed by - * [[org.apache.spark.mllib.util.MLUtils#fastSquaredDistance]]. - */ - private[clustering] def fastSquaredDistance( - v1: VectorWithNorm, - v2: VectorWithNorm): Double = { - MLUtils.fastSquaredDistance(v1.vector, v1.norm, v2.vector, v2.norm) - } - - private[spark] def validateInitMode(initMode: String): Boolean = { - initMode match { - case KMeans.RANDOM => true - case KMeans.K_MEANS_PARALLEL => true - case _ => false - } - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LDA.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LDA.scala index 35f087f..ea40a85 100644 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LDA.scala +++ b/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LDA.scala @@ -1,9 +1,3 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -27,7 +21,7 @@ import java.util.Locale import breeze.linalg.{DenseVector => BDV} -import org.apache.spark.annotation.{DeveloperApi, Since} +import org.apache.spark.annotation.Since import org.apache.spark.api.java.JavaPairRDD import org.apache.spark.graphx._ import org.apache.spark.internal.Logging @@ -288,21 +282,15 @@ class LDA private ( /** - * :: DeveloperApi :: - * * LDAOptimizer used to perform the actual calculation */ @Since("1.4.0") - @DeveloperApi def getOptimizer: LDAOptimizer = ldaOptimizer /** - * :: DeveloperApi :: - * * LDAOptimizer used to perform the actual calculation (default = EMLDAOptimizer) */ @Since("1.4.0") - @DeveloperApi def setOptimizer(optimizer: LDAOptimizer): this.type = { this.ldaOptimizer = optimizer this @@ -341,7 +329,7 @@ class LDA private ( val state = ldaOptimizer.initialize(documents, this) timer.stop("initialize") var iter = 0 - val iterationTimes = Array.fill[Double](maxIterations)(0) + val iterationTimes = Array.ofDim[Double](maxIterations) timer.start("train") LDAUtilsXOpt.init(documents.sparkContext) diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala index 64edba0..42fd9de 100644 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala +++ b/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala @@ -1,9 +1,3 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -29,20 +23,18 @@ import breeze.linalg.{all, sum, DenseMatrix => BDM, DenseVector => BDV} import breeze.numerics.{abs, exp, trigamma} import breeze.stats.distributions.{Gamma, RandBasis} -import org.apache.spark.annotation.{DeveloperApi, Since} +import org.apache.spark.annotation.Since import org.apache.spark.internal.Logging import org.apache.spark.ml.tree.impl.TimeTracker import org.apache.spark.mllib.linalg.{DenseVector, Matrices, SparseVector, Vector, Vectors} import org.apache.spark.rdd.RDD +import org.apache.spark.storage.StorageLevel /** - * :: DeveloperApi :: - * * An LDAOptimizer specifies which optimization/learning/inference algorithm to use, and it can * hold optimizer-specific parameters for users to set. */ @Since("1.4.0") -@DeveloperApi trait LDAOptimizer { /* @@ -67,8 +59,6 @@ trait LDAOptimizer { } /** - * :: DeveloperApi :: - * * An online optimizer for LDA. The Optimizer implements the Online variational Bayes LDA * algorithm, which processes a subset of the corpus on each iteration, and updates the term-topic * distribution adaptively. @@ -77,7 +67,6 @@ trait LDAOptimizer { * Hoffman, Blei and Bach, "Online Learning for Latent Dirichlet Allocation." NIPS, 2010. */ @Since("1.4.0") -@DeveloperApi final class OnlineLDAOptimizer extends LDAOptimizer with Logging { // LDA common parameters @@ -254,6 +243,10 @@ final class OnlineLDAOptimizer extends LDAOptimizer with Logging { this.randomGenerator = new Random(lda.getSeed) this.docs = docs + if (this.docs.getStorageLevel == StorageLevel.NONE) { + logWarning("The input data is not directly cached, which may hurt performance if its" + + " parent RDDs are also uncached.") + } // Initialize the variational distribution q(beta|lambda) this.lambda = getGammaMatrix(k, vocabSize) @@ -296,9 +289,10 @@ final class OnlineLDAOptimizer extends LDAOptimizer with Logging { val alpha = this.alpha.asBreeze val gammaShape = this.gammaShape val optimizeDocConcentration = this.optimizeDocConcentration + val seed = randomGenerator.nextLong() // If and only if optimizeDocConcentration is set true, // we calculate logphat in the same pass as other statistics. - // No calculation of loghat happens otherwise. + // No calculation of logphat happens otherwise. val logphatPartOptionBase = () => if (optimizeDocConcentration) { Some(BDV.zeros[Double](k)) } else { @@ -309,9 +303,10 @@ final class OnlineLDAOptimizer extends LDAOptimizer with Logging { val stats: RDD[(BDM[Double], Option[BDV[Double]], Long)] = if (LDAUtilsXOpt.useOptimizedCalc()) { LDAUtilsXOpt.optimizedCalcStats(batch, expElogbetaBc, k, vocabSize, logphatPartOptionBase, - alpha, gammaShape) + alpha, gammaShape, seed) } else { - batch.mapPartitions { docs => + batch.mapPartitionsWithIndex { + (index, docs) => val nonEmptyDocs = docs.filter(_._2.numNonzeros > 0) val stat = BDM.zeros[Double](k, vocabSize) @@ -321,7 +316,7 @@ final class OnlineLDAOptimizer extends LDAOptimizer with Logging { nonEmptyDocs.foreach { case (_, termCounts: Vector) => nonEmptyDocCount += 1 val (gammad, sstats, ids) = OnlineLDAOptimizerXObj.variationalTopicInference( - termCounts, expElogbetaBcValue, alpha, gammaShape, k) + termCounts, expElogbetaBcValue, alpha, gammaShape, k, seed + index) stat(::, ids) := stat(::, ids) + sstats logphatPartOption.foreach(_ += LDAUtilsX.dirichletExpectation(gammad)) } @@ -357,6 +352,7 @@ final class OnlineLDAOptimizer extends LDAOptimizer with Logging { if (nonEmptyDocsN == 0) { logWarning("No non-empty documents were submitted in the batch.") + timer.stop("update-lambda") // Therefore, there is no need to update any of the model parameters return this } @@ -439,7 +435,8 @@ final class OnlineLDAOptimizer extends LDAOptimizer with Logging { } override private[clustering] def getLDAModel(iterationTimes: Array[Double]): LDAModel = { - new LocalLDAModel(Matrices.fromBreeze(lambda).transpose, alpha, eta, gammaShape) + new LocalLDAModel(Matrices.fromBreeze(lambda).transpose, alpha, eta) + .setSeed(randomGenerator.nextLong()) } } @@ -448,7 +445,7 @@ final class OnlineLDAOptimizer extends LDAOptimizer with Logging { * Serializable companion object containing helper methods and shared code for * [[OnlineLDAOptimizer]] and [[LocalLDAModel]]. */ -private[clustering] object OnlineLDAOptimizer { +private[spark] object OnlineLDAOptimizer { /** * Uses variational inference to infer the topic distribution `gammad` given the term counts * for a document. `termCounts` must contain at least one non-zero entry, otherwise Breeze will @@ -461,25 +458,24 @@ private[clustering] object OnlineLDAOptimizer { * @return Returns a tuple of `gammad` - estimate of gamma, the topic distribution, `sstatsd` - * statistics for updating lambda and `ids` - list of termCounts vector indices. */ - private[clustering] def variationalTopicInference( - termCounts: Vector, + private[spark] def variationalTopicInference( + indices: List[Int], + values: Array[Double], expElogbeta: BDM[Double], alpha: breeze.linalg.Vector[Double], gammaShape: Double, - k: Int): (BDV[Double], BDM[Double], List[Int]) = { - val (ids: List[Int], cts: Array[Double]) = termCounts match { - case v: DenseVector => ((0 until v.size).toList, v.values) - case v: SparseVector => (v.indices.toList, v.values) - } + k: Int, + seed: Long): (BDV[Double], BDM[Double], List[Int]) = { // Initialize the variational distribution q(theta|gamma) for the mini-batch + val randBasis = new RandBasis(new org.apache.commons.math3.random.MersenneTwister(seed)) val gammad: BDV[Double] = - new Gamma(gammaShape, 1.0 / gammaShape).samplesVector(k) // K + new Gamma(gammaShape, 1.0 / gammaShape)(randBasis).samplesVector(k) // K val expElogthetad: BDV[Double] = exp(LDAUtils.dirichletExpectation(gammad)) // K - val expElogbetad = expElogbeta(ids, ::).toDenseMatrix // ids * K + val expElogbetad = expElogbeta(indices, ::).toDenseMatrix // ids * K val phiNorm: BDV[Double] = expElogbetad * expElogthetad +:+ 1e-100 // ids var meanGammaChange = 1D - val ctsVector = new BDV[Double](cts) // ids + val ctsVector = new BDV[Double](values) // ids // Iterate between gamma and phi until convergence while (meanGammaChange > 1e-3) { @@ -493,6 +489,20 @@ private[clustering] object OnlineLDAOptimizer { } val sstatsd = expElogthetad.asDenseMatrix.t * (ctsVector /:/ phiNorm).asDenseMatrix - (gammad, sstatsd, ids) + (gammad, sstatsd, indices) + } + + private[clustering] def variationalTopicInference( + termCounts: Vector, + expElogbeta: BDM[Double], + alpha: breeze.linalg.Vector[Double], + gammaShape: Double, + k: Int, + seed: Long): (BDV[Double], BDM[Double], List[Int]) = { + val (ids: List[Int], cts: Array[Double]) = termCounts match { + case v: DenseVector => (List.range(0, v.size), v.values) + case v: SparseVector => (v.indices.toList, v.values) + } + variationalTopicInference(ids, cts, expElogbeta, alpha, gammaShape, k, seed) } } diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LocalKMeansX.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LocalKMeansX.scala deleted file mode 100644 index 4d7c18a..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/clustering/LocalKMeansX.scala +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.clustering - -import scala.util.Random - -import org.apache.spark.internal.Logging -import org.apache.spark.mllib.linalg.BLAS.{axpy, scal} -import org.apache.spark.mllib.linalg.Vectors -import org.apache.spark.sql.SparkSession - -/** - * An utility object to run K-means locally. This is private to the ML package because it's used - * in the initialization of KMeans but not meant to be publicly exposed. - */ -private[mllib] object LocalKMeansX extends Logging { - - /** - * Run K-means++ on the weighted point set `points`. This first does the K-means++ - * initialization procedure and then rounds of Lloyd's algorithm. - */ - def kMeansPlusPlus( - seed: Int, - points: Array[VectorWithNorm], - weights: Array[Double], - k: Int, - maxIterations: Int - ): Array[VectorWithNorm] = { - val DEFAULT_PAR_LEVEL = 100 - val rand = new Random(seed) - val dimensions = points(0).vector.size - val centers = new Array[VectorWithNorm](k) - - // Initialize centers by sampling using the k-means++ procedure. - centers(0) = pickWeighted(rand, points, weights).toDense - var costArray = points.map(KMeans.fastSquaredDistance(_, centers(0))) - - var customParLevel = DEFAULT_PAR_LEVEL - try{ - customParLevel = SparkSession.builder().getOrCreate() - .sparkContext.getConf.getInt("spark.boostkit.Kmeans.parLevel", - DEFAULT_PAR_LEVEL) - if (customParLevel < 1) { - throw new Exception - } - } - catch { - case x: Exception => - throw new Exception("'spark.boostkit.Kmeans.parLevel' value is invalid") - } - val customForkJoinPool = new scala.concurrent.forkjoin.ForkJoinPool(customParLevel) - val customTaskSupport = new scala.collection.parallel.ForkJoinTaskSupport(customForkJoinPool) - - for (i <- 1 until k) { - val sum = costArray.zip(weights).map(p => p._1 * p._2).sum - val r = rand.nextDouble() * sum - var cumulativeScore = 0.0 - var j = 0 - while (j < points.length && cumulativeScore < r) { - cumulativeScore += weights(j) * costArray(j) - j += 1 - } - if (j == 0) { - logWarning("kMeansPlusPlus initialization ran out of distinct points for centers." + - s" Using duplicate point for center k = $i.") - centers(i) = points(0).toDense - } else { - centers(i) = points(j - 1).toDense - } - - val costArrayPar = points.zipWithIndex.par - costArrayPar.tasksupport = customTaskSupport - costArray = costArrayPar.map{ - t => - math.min(KMeans.fastSquaredDistance(points(t._2), centers(i)), costArray(t._2)) - }.toArray - } - - // Run up to maxIterations iterations of Lloyd's algorithm - val oldClosest = Array.fill(points.length)(-1) - var iteration = 0 - var moved = true - while (moved && iteration < maxIterations) { - moved = false - val counts = Array.fill(k)(0.0) - val sums = Array.fill(k)(Vectors.zeros(dimensions)) - - val assignPar = points.par - assignPar.tasksupport = customTaskSupport - val assign = assignPar.map{ - case point => - val index = KMeans.findClosest(centers, point)._1 - (point, index) - }.toArray - - var idx = 0 - while(idx < assign.size) { - val index = assign(idx)._2 - if (index != oldClosest(idx)) { - moved = true - oldClosest(idx) = index - } - axpy(weights(idx), assign(idx)._1.vector, sums(index)) - counts(index) += weights(idx) - idx += 1 - } - - // Update centers - var j = 0 - while (j < k) { - if (counts(j) == 0.0) { - // Assign center to a random point - centers(j) = points(rand.nextInt(points.length)).toDense - } else { - scal(1.0 / counts(j), sums(j)) - centers(j) = new VectorWithNorm(sums(j)) - } - j += 1 - } - iteration += 1 - } - - if (iteration == maxIterations) { - logInfo(s"Local KMeans++ reached the max number of iterations: $maxIterations.") - } else { - logInfo(s"Local KMeans++ converged in $iteration iterations.") - } - centers - } - - private def pickWeighted[T](rand: Random, data: Array[T], weights: Array[Double]): T = { - val r = rand.nextDouble() * weights.sum - var i = 0 - var curWeight = 0.0 - while (i < data.length && curWeight < r) { - curWeight += weights(i) - i += 1 - } - data(i - 1) - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/feature/IDF.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/feature/IDF.scala deleted file mode 100644 index f633c1b..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/feature/IDF.scala +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.feature - -import breeze.linalg.{DenseVector => BDV} - -import org.apache.spark.annotation.Since -import org.apache.spark.api.java.JavaRDD -import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector, Vectors} -import org.apache.spark.rdd.RDD - -/** - * Inverse document frequency (IDF). - * The standard formulation is used: `idf = log((m + 1) / (d(t) + 1))`, where `m` is the total - * number of documents and `d(t)` is the number of documents that contain term `t`. - * - * This implementation supports filtering out terms which do not appear in a minimum number - * of documents (controlled by the variable `minDocFreq`). For terms that are not in - * at least `minDocFreq` documents, the IDF is found as 0, resulting in TF-IDFs of 0. - * - * @param minDocFreq minimum of documents in which a term - * should appear for filtering - */ -@Since("1.1.0") -class IDF @Since("1.2.0") (@Since("1.2.0") val minDocFreq: Int) { - - @Since("1.1.0") - def this() = this(0) - - // TODO: Allow different IDF formulations. - - /** - * Computes the inverse document frequency. - * @param dataset an RDD of term frequency vectors - */ - @Since("1.1.0") - def fit(dataset: RDD[Vector]): IDFModel = { - val (idf, docFreq, numDocs) = IDFUtils.train(dataset, minDocFreq) - new IDFModel(idf) - } - - /** - * Computes the inverse document frequency. - * @param dataset a JavaRDD of term frequency vectors - */ - @Since("1.1.0") - def fit(dataset: JavaRDD[Vector]): IDFModel = { - fit(dataset.rdd) - } -} - -private object IDF { - - /** Document frequency aggregator. */ - class DocumentFrequencyAggregator(val minDocFreq: Int) extends Serializable { - - /** number of documents */ - private var m = 0L - /** document frequency vector */ - private var df: BDV[Long] = _ - - - def this() = this(0) - - /** Adds a new document. */ - def add(doc: Vector): this.type = { - if (isEmpty) { - df = BDV.zeros(doc.size) - } - doc match { - case SparseVector(size, indices, values) => - val nnz = indices.length - var k = 0 - while (k < nnz) { - if (values(k) > 0) { - df(indices(k)) += 1L - } - k += 1 - } - case DenseVector(values) => - val n = values.length - var j = 0 - while (j < n) { - if (values(j) > 0.0) { - df(j) += 1L - } - j += 1 - } - case other => - throw new UnsupportedOperationException( - s"Only sparse and dense vectors are supported but got ${other.getClass}.") - } - m += 1L - this - } - - /** Merges another. */ - def merge(other: DocumentFrequencyAggregator): this.type = { - if (!other.isEmpty) { - m += other.m - if (df == null) { - df = other.df.copy - } else { - df += other.df - } - } - this - } - - private def isEmpty: Boolean = m == 0L - - /** Returns the current IDF vector. */ - def idf(): Vector = { - if (isEmpty) { - throw new IllegalStateException("Haven't seen any document yet.") - } - val n = df.length - val inv = new Array[Double](n) - var j = 0 - while (j < n) { - /* - * If the term is not present in the minimum - * number of documents, set IDF to 0. This - * will cause multiplication in IDFModel to - * set TF-IDF to 0. - * - * Since arrays are initialized to 0 by default, - * we just omit changing those entries. - */ - if (df(j) >= minDocFreq) { - inv(j) = math.log((m + 1.0) / (df(j) + 1.0)) - } - j += 1 - } - Vectors.dense(inv) - } - } -} - -/** - * Represents an IDF model that can transform term frequency vectors. - */ -@Since("1.1.0") -class IDFModel private[spark] (@Since("1.1.0") val idf: Vector) extends Serializable { - - /** - * Transforms term frequency (TF) vectors to TF-IDF vectors. - * - * If `minDocFreq` was set for the IDF calculation, - * the terms which occur in fewer than `minDocFreq` - * documents will have an entry of 0. - * - * @param dataset an RDD of term frequency vectors - * @return an RDD of TF-IDF vectors - */ - @Since("1.1.0") - def transform(dataset: RDD[Vector]): RDD[Vector] = { - val bcIdf = dataset.context.broadcast(idf) - dataset.mapPartitions(iter => iter.map(v => IDFModel.transform(bcIdf.value, v))) - } - - /** - * Transforms a term frequency (TF) vector to a TF-IDF vector - * - * @param v a term frequency vector - * @return a TF-IDF vector - */ - @Since("1.3.0") - def transform(v: Vector): Vector = IDFModel.transform(idf, v) - - /** - * Transforms term frequency (TF) vectors to TF-IDF vectors (Java version). - * @param dataset a JavaRDD of term frequency vectors - * @return a JavaRDD of TF-IDF vectors - */ - @Since("1.1.0") - def transform(dataset: JavaRDD[Vector]): JavaRDD[Vector] = { - transform(dataset.rdd).toJavaRDD() - } -} - -private object IDFModel { - - /** - * Transforms a term frequency (TF) vector to a TF-IDF vector with a IDF vector - * - * @param idf an IDF vector - * @param v a term frequency vector - * @return a TF-IDF vector - */ - def transform(idf: Vector, v: Vector): Vector = { - val n = v.size - v match { - case SparseVector(size, indices, values) => - val nnz = indices.length - val newValues = new Array[Double](nnz) - var k = 0 - while (k < nnz) { - newValues(k) = values(k) * idf(indices(k)) - k += 1 - } - Vectors.sparse(n, indices, newValues) - case DenseVector(values) => - val newValues = new Array[Double](n) - var j = 0 - while (j < n) { - newValues(j) = values(j) * idf(j) - j += 1 - } - Vectors.dense(newValues) - case other => - throw new UnsupportedOperationException( - s"Only sparse and dense vectors are supported but got ${other.getClass}.") - } - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala index 0a30a2e..23fabc6 100644 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala +++ b/ml-accelerator/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala @@ -1,9 +1,3 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -40,12 +34,11 @@ import org.apache.spark.storage.StorageLevel * A parallel PrefixSpan algorithm to mine frequent sequential patterns. * The PrefixSpan algorithm is described in J. Pei, et al., PrefixSpan: Mining Sequential Patterns * Efficiently by Prefix-Projected Pattern Growth - * (see here). + * (see here). * * @param minSupport the minimal support level of the sequential pattern, any pattern that appears * more than (minSupport * size-of-the-dataset) times will be output - * @param maxPatternLength the maximal length of the sequential pattern, any pattern that appears - * less than maxPatternLength will be output + * @param maxPatternLength the maximal length of the sequential pattern * @param maxLocalProjDBSize The maximum number of items (including delimiters used in the internal * storage format) allowed in a projected database before local * processing. If a projected database exceeds this size, another @@ -170,6 +163,13 @@ class PrefixSpan private ( val freqSequences = results.map { case (seq: Array[Int], count: Long) => new FreqSequence(toPublicRepr(seq), count) } + // Cache the final RDD to the same storage level as input + if (data.getStorageLevel != StorageLevel.NONE) { + freqSequences.persist(data.getStorageLevel) + freqSequences.count() + } + dataInternalRepr.unpersist() + new PrefixSpanModel(freqSequences) } diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/linalg/EigenValueDecomposition.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/linalg/EigenValueDecomposition.scala deleted file mode 100644 index d121ad1..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/linalg/EigenValueDecomposition.scala +++ /dev/null @@ -1,285 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.linalg - -import breeze.linalg.{DenseMatrix => BDM, DenseMatrixUtil, DenseVector => BDV} -import breeze.linalg.blas.Dgemv -import com.github.fommil.netlib.ARPACK -import org.netlib.util.{doubleW, intW} - -/** - * Compute eigen-decomposition. - */ -private[mllib] object EigenValueDecomposition { - - private val DEFAULT_THREAD_NUM = 35 - - /** - * Compute the leading k eigenvalues and eigenvectors on a symmetric square matrix using ARPACK. - * The caller needs to ensure that the input matrix is real symmetric. This function requires - * memory for `n*(4*k+4)` doubles. - * - * @param mul a function that multiplies the symmetric matrix with a DenseVector. - * @param n dimension of the square matrix (maximum Int.MaxValue). - * @param k number of leading eigenvalues required, where k must be positive and less than n. - * @param tol tolerance of the eigs computation. - * @param maxIterations the maximum number of Arnoldi update iterations. - * @return a dense vector of eigenvalues in descending order and a dense matrix of eigenvectors - * (columns of the matrix). - * @note The number of computed eigenvalues might be smaller than k when some Ritz values do not - * satisfy the convergence criterion specified by tol (see ARPACK Users Guide, Chapter 4.6 - * for more details). The maximum number of Arnoldi update iterations is set to 300 in this - * function. - */ - def symmetricEigs( - mul: BDV[Double] => BDV[Double], - n: Int, - k: Int, - tol: Double, - maxIterations: Int): (BDV[Double], BDM[Double]) = { - // TODO: remove this function and use eigs in breeze when switching breeze version - require(n > k, s"Number of required eigenvalues $k must be smaller than matrix dimension $n") - - val arpack = ARPACK.getInstance() - - // tolerance used in stopping criterion - val tolW = new doubleW(tol) - // number of desired eigenvalues, 0 < nev < n - val nev = new intW(k) - // nev Lanczos vectors are generated in the first iteration - // ncv-nev Lanczos vectors are generated in each subsequent iteration - // ncv must be smaller than n - val ncv = math.min(2 * k, n) - - // "I" for standard eigenvalue problem, "G" for generalized eigenvalue problem - val bmat = "I" - // "LM" : compute the NEV largest (in magnitude) eigenvalues - val which = "LM" - - var iparam = new Array[Int](11) - // use exact shift in each iteration - iparam(0) = 1 - // maximum number of Arnoldi update iterations, or the actual number of iterations on output - iparam(2) = maxIterations - // Mode 1: A*x = lambda*x, A symmetric - iparam(6) = 1 - - require(n * ncv.toLong <= Integer.MAX_VALUE && ncv * (ncv.toLong + 8) <= Integer.MAX_VALUE, - s"k = $k and/or n = $n are too large to compute an eigendecomposition") - - val ido = new intW(0) - val info = new intW(0) - val resid = new Array[Double](n) - val v = new Array[Double](n * ncv) - val workd = new Array[Double](n * 3) - val workl = new Array[Double](ncv * (ncv + 8)) - val ipntr = new Array[Int](11) - - // call ARPACK's reverse communication, first iteration with ido = 0 - arpack.dsaupd(ido, bmat, n, which, nev.`val`, tolW, resid, ncv, v, n, iparam, ipntr, workd, - workl, workl.length, info) - - val w = BDV(workd) - - // ido = 99 : done flag in reverse communication - while (ido.`val` != 99) { - if (ido.`val` != -1 && ido.`val` != 1) { - throw new IllegalStateException("ARPACK returns ido = " + ido.`val` + - " This flag is not compatible with Mode 1: A*x = lambda*x, A symmetric.") - } - // multiply working vector with the matrix - val inputOffset = ipntr(0) - 1 - val outputOffset = ipntr(1) - 1 - val x = w.slice(inputOffset, inputOffset + n) - val y = w.slice(outputOffset, outputOffset + n) - y := mul(x) - // call ARPACK's reverse communication - arpack.dsaupd(ido, bmat, n, which, nev.`val`, tolW, resid, ncv, v, n, iparam, ipntr, - workd, workl, workl.length, info) - } - - if (info.`val` != 0) { - info.`val` match { - case 1 => throw new IllegalStateException("ARPACK returns non-zero info = " + info.`val` + - " Maximum number of iterations taken. (Refer ARPACK user guide for details)") - case 3 => throw new IllegalStateException("ARPACK returns non-zero info = " + info.`val` + - " No shifts could be applied. Try to increase NCV. " + - "(Refer ARPACK user guide for details)") - case _ => throw new IllegalStateException("ARPACK returns non-zero info = " + info.`val` + - " Please refer ARPACK user guide for error message.") - } - } - - val d = new Array[Double](nev.`val`) - val select = new Array[Boolean](ncv) - // copy the Ritz vectors - val z = java.util.Arrays.copyOfRange(v, 0, nev.`val` * n) - - // call ARPACK's post-processing for eigenvectors - arpack.dseupd(true, "A", select, d, z, n, 0.0, bmat, n, which, nev, tol, resid, ncv, v, n, - iparam, ipntr, workd, workl, workl.length, info) - - // number of computed eigenvalues, might be smaller than k - val computed = iparam(4) - - val eigenPairs = java.util.Arrays.copyOfRange(d, 0, computed).zipWithIndex.map { r => - (r._1, java.util.Arrays.copyOfRange(z, r._2 * n, r._2 * n + n)) - } - - // sort the eigen-pairs in descending order - val sortedEigenPairs = eigenPairs.sortBy(- _._1) - - // copy eigenvectors in descending order of eigenvalues - val sortedU = BDM.zeros[Double](n, computed) - sortedEigenPairs.zipWithIndex.foreach { r => - val b = r._2 * n - var i = 0 - while (i < n) { - sortedU.data(b + i) = r._1._2(i) - i += 1 - } - } - - (BDV[Double](sortedEigenPairs.map(_._1)), sortedU) - } - - def symmetricEigsLocal( - matrix: BDM[Double], - n: Int, - k: Int, - tol: Double, - maxIterations: Int, - driverCores: Int): (BDV[Double], BDM[Double]) = { - // TODO: remove this function and use eigs in breeze when switching breeze version - require(n > k, s"Number of required eigenvalues $k must be smaller than matrix dimension $n") - - val threadNum = math.min( - if (driverCores < 2) DEFAULT_THREAD_NUM else driverCores, matrix.rows) - val blocks = DenseMatrixUtil.blockByRow(matrix, threadNum) - - val arpack = ARPACK.getInstance() - - // tolerance used in stopping criterion - val tolW = new doubleW(tol) - // number of desired eigenvalues, 0 < nev < n - val nev = new intW(k) - // nev Lanczos vectors are generated in the first iteration - // ncv-nev Lanczos vectors are generated in each subsequent iteration - // ncv must be smaller than n - val ncv = math.min(2 * k, n) - - // "I" for standard eigenvalue problem, "G" for generalized eigenvalue problem - val bmat = "I" - // "LM" : compute the NEV largest (in magnitude) eigenvalues - val which = "LM" - - var iparam = new Array[Int](11) - // use exact shift in each iteration - iparam(0) = 1 - // maximum number of Arnoldi update iterations, or the actual number of iterations on output - iparam(2) = maxIterations - // Mode 1: A*x = lambda*x, A symmetric - iparam(6) = 1 - - require(n * ncv.toLong <= Integer.MAX_VALUE && ncv * (ncv.toLong + 8) <= Integer.MAX_VALUE, - s"k = $k and/or n = $n are too large to compute an eigendecomposition") - - val ido = new intW(0) - val info = new intW(0) - val resid = new Array[Double](n) - val v = new Array[Double](n * ncv) - val workd = new Array[Double](n * 3) - val workl = new Array[Double](ncv * (ncv + 8)) - val ipntr = new Array[Int](11) - - // call ARPACK's reverse communication, first iteration with ido = 0 - arpack.dsaupd(ido, bmat, n, which, nev.`val`, tolW, resid, ncv, v, n, iparam, ipntr, workd, - workl, workl.length, info) - - val w = BDV(workd) - - // ido = 99 : done flag in reverse communication - while (ido.`val` != 99) { - if (ido.`val` != -1 && ido.`val` != 1) { - throw new IllegalStateException("ARPACK returns ido = " + ido.`val` + - " This flag is not compatible with Mode 1: A*x = lambda*x, A symmetric.") - } - - // multiply working vector with the matrix - val inputOffset = ipntr(0) - 1 - val outputOffset = ipntr(1) - 1 - val input = w.slice(inputOffset, inputOffset + n) - val output = Dgemv.compute(blocks, input) - System.arraycopy(output.data, 0, workd, outputOffset, n) - - // call ARPACK's reverse communication - arpack.dsaupd(ido, bmat, n, which, nev.`val`, tolW, resid, ncv, v, n, iparam, ipntr, - workd, workl, workl.length, info) - } - - if (info.`val` != 0) { - info.`val` match { - case 1 => throw new IllegalStateException("ARPACK returns non-zero info = " + info.`val` + - " Maximum number of iterations taken. (Refer ARPACK user guide for details)") - case 3 => throw new IllegalStateException("ARPACK returns non-zero info = " + info.`val` + - " No shifts could be applied. Try to increase NCV. " + - "(Refer ARPACK user guide for details)") - case _ => throw new IllegalStateException("ARPACK returns non-zero info = " + info.`val` + - " Please refer ARPACK user guide for error message.") - } - } - - val d = new Array[Double](nev.`val`) - val select = new Array[Boolean](ncv) - // copy the Ritz vectors - val z = java.util.Arrays.copyOfRange(v, 0, nev.`val` * n) - - // call ARPACK's post-processing for eigenvectors - arpack.dseupd(true, "A", select, d, z, n, 0.0, bmat, n, which, nev, tol, resid, ncv, v, n, - iparam, ipntr, workd, workl, workl.length, info) - - // number of computed eigenvalues, might be smaller than k - val computed = iparam(4) - - val eigenPairs = java.util.Arrays.copyOfRange(d, 0, computed).zipWithIndex.map { r => - (r._1, java.util.Arrays.copyOfRange(z, r._2 * n, r._2 * n + n)) - } - - // sort the eigen-pairs in descending order - val sortedEigenPairs = eigenPairs.sortBy(- _._1) - - // copy eigenvectors in descending order of eigenvalues - val sortedU = BDM.zeros[Double](n, computed) - sortedEigenPairs.zipWithIndex.foreach { r => - val b = r._2 * n - var i = 0 - while (i < n) { - sortedU.data(b + i) = r._1._2(i) - i += 1 - } - } - - (BDV[Double](sortedEigenPairs.map(_._1)), sortedU) - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala deleted file mode 100644 index 0d62720..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala +++ /dev/null @@ -1,910 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.linalg.distributed - - -import java.util.Arrays - -import scala.collection.mutable.ListBuffer - -import breeze.linalg.{axpy => brzAxpy, inv, DenseMatrix => BDM, DenseVector => BDV, MatrixSingularException, SparseVector => BSV} -import breeze.linalg.blas.Gramian -import breeze.linalg.lapack.EigenDecomposition -import breeze.linalg.lapack.EigenDecomposition.Eigen -import breeze.numerics.{sqrt => brzSqrt} -import com.github.fommil.netlib.BLAS.{getInstance => blas} - -import org.apache.spark.annotation.Since -import org.apache.spark.internal.Logging -import org.apache.spark.ml.StaticUtils -import org.apache.spark.mllib.feature.SPCA -import org.apache.spark.mllib.linalg._ -import org.apache.spark.mllib.stat.{MultivariateOnlineSummarizer, MultivariateStatisticalSummary, Statistics} -import org.apache.spark.rdd.RDD -import org.apache.spark.storage.StorageLevel -import org.apache.spark.util.random.XORShiftRandom - - - -/** - * Represents a row-oriented distributed Matrix with no meaningful row indices. - * - * @param rows rows stored as an RDD[Vector] - * @param nRows number of rows. A non-positive value means unknown, and then the number of rows will - * be determined by the number of records in the RDD `rows`. - * @param nCols number of columns. A non-positive value means unknown, and then the number of - * columns will be determined by the size of the first row. - */ -@Since("1.0.0") -class RowMatrix @Since("1.0.0")( - @Since("1.0.0") val rows: RDD[Vector], - private var nRows: Long, - private var nCols: Int) extends DistributedMatrix with Logging { - - /** Alternative constructor leaving matrix dimensions to be determined automatically. */ - @Since("1.0.0") - def this(rows: RDD[Vector]) = this(rows, 0L, 0) - - /** Gets or computes the number of columns. */ - @Since("1.0.0") - override def numCols(): Long = { - if (nCols <= 0) { - try { - // Calling `first` will throw an exception if `rows` is empty. - nCols = rows.first().size - } catch { - case err: UnsupportedOperationException => - sys.error("Cannot determine the number of cols because it is not specified in the " + - "constructor and the rows RDD is empty.") - } - } - nCols - } - - /** Gets or computes the number of rows. */ - @Since("1.0.0") - override def numRows(): Long = { - if (nRows <= 0L) { - nRows = rows.count() - if (nRows == 0L) { - sys.error("Cannot determine the number of rows because it is not specified in the " + - "constructor and the rows RDD is empty.") - } - } - nRows - } - - /** - * Multiplies the Gramian matrix `A^T A` by a dense vector on the right without computing `A^T A`. - * - * @param v a dense vector whose length must match the number of columns of this matrix - * @return a dense vector representing the product - */ - private[mllib] def multiplyGramianMatrixBy(v: BDV[Double]): BDV[Double] = { - val n = numCols().toInt - val vbr = rows.context.broadcast(v) - rows.treeAggregate(BDV.zeros[Double](n))( - seqOp = (U, r) => { - val rBrz = r.asBreeze - val a = rBrz.dot(vbr.value) - rBrz match { - // use specialized axpy for better performance - case _: BDV[_] => brzAxpy(a, rBrz.asInstanceOf[BDV[Double]], U) - case _: BSV[_] => brzAxpy(a, rBrz.asInstanceOf[BSV[Double]], U) - case _ => throw new UnsupportedOperationException( - s"Do not support vector operation from type ${rBrz.getClass.getName}.") - } - U - }, combOp = (U1, U2) => U1 += U2) - } - - /** - * Computes the Gramian matrix `A^T A`. - * - * @note This cannot be computed on matrices with more than 65535 columns. - */ - @Since("1.0.0") - def computeGramianMatrix(): Matrix = { - if (rows.map(_.isInstanceOf[SparseVector]).reduce((x, y) => x && y)) { - RowMatrixUtil.computeGramMatrixAsDenseMatrix( - rows.map(_.asInstanceOf[SparseVector]), numCols().toInt) - } else { - computeDenseGramianMatrix() - } - } - - - /** - * Compute the leading k eigenvalues and eigenvectors on a symmetric square sparse matrix. - * - * @param n dimension of the square matrix (maximum Int.MaxValue). - * @param k number of leading eigenvalues required, where k must be positive and less than n. - * @param tol tolerance of the eigs computation. - * @param maxIter the maximum number of Arnoldi update iterations. - * @return a dense vector of eigenvalues in descending order and a dense matrix of eigenvectors - * (columns of the matrix). - * @note The number of computed eigenvalues might be smaller than k when some Ritz values do not - * satisfy the convergence criterion specified by tol (see ARPACK Users Guide, Chapter 4.6 - * for more details). The maximum number of Arnoldi update iterations is set to 300 in this - * function. - */ - def eigenValueDecompositionOnSparseMatrix( - n: Int, - k: Int, - tol: Double, - maxIter: Int): (BDV[Double], BDM[Double]) = { - val result = RowMatrixUtil.computeGramMatrix( - rows.map(_.asInstanceOf[SparseVector]), n) - EigenValueDecomposition.symmetricEigs( - RowMatrixUtil.multiplySparseGramMatrixBy(result), - n, k, tol, maxIter) - } - - /** - * Compute the leading k eigenvalues and eigenvectors on a symmetric square dense matrix. - * - * @param n dimension of the square matrix (maximum Int.MaxValue). - * @param k number of leading eigenvalues required, where k must be positive and less than n. - * @param tol tolerance of the eigs computation. - * @param maxIter the maximum number of Arnoldi update iterations. - * @return a dense vector of eigenvalues in descending order and a dense matrix of eigenvectors - * (columns of the matrix). - */ - def eigenValueDecompositionOnDenseMatrix( - n: Int, - k: Int, - tol: Double, - maxIter: Int): (BDV[Double], BDM[Double]) = { - val result = RowMatrixUtil.computeGramMatrix( - rows.map(_.asInstanceOf[SparseVector]), n) - val resultDenseMatrix = result._3.map{case ((i, j), sp) => - ((i, j), new BDM[Double](sp.numRows, sp.numCols, sp.toArray))} - val newResult = (result._1, result._2, resultDenseMatrix) - EigenValueDecomposition.symmetricEigs( - RowMatrixUtil.multiplyDenseGramMatrixBy(newResult), - n, k, tol, maxIter) - } - - /** - * Computes the Gramian matrix `A^T A` of dense matrix. - * @return Gramian matrix - */ - def computeDenseGramianMatrix(): Matrix = { - val n = numCols().toInt - checkNumColumns(n) - - // compute the upper triangular matrix - val gramianLen = n * (n + 1) / 2 - val gramian = rows.mapPartitions(iter => { - val subMatrixValues = iter.map(_.toArray).toArray - val subMatrixRow = subMatrixValues.length - val localCovariance = new Array[Double](gramianLen) - Gramian.compute(subMatrixValues.flatten, localCovariance, subMatrixRow, n) - Array(localCovariance).iterator - }).treeReduce((cov1, cov2) => { - blas.daxpy(cov1.length, 1.0, cov2, 1, cov1, 1) - cov1 - }, depth = 4) - - // full fill the gramian matrix - val fullGramian = new Array[Double](n * n) - for(i <- 0 until n) { - val srcOffset = (2 * n - i + 1) * i / 2 - fullGramian(i * n + i) = gramian(srcOffset) - for(j <- i until n) { - val v = gramian(srcOffset + j - i) - fullGramian(i * n + j) = v - fullGramian(j * n + i) = v - } - } - - new DenseMatrix(n, n, fullGramian) - } - - - private def checkNumColumns(cols: Int): Unit = { - if (cols > 65535) { - throw new IllegalArgumentException(s"Argument with more than 65535 cols: $cols") - } - if (cols > 10000) { - val memMB = (cols.toLong * cols) / 125000 - logWarning(s"$cols columns will require at least $memMB megabytes of memory!") - } - } - - /** - * Computes singular value decomposition of this matrix. Denote this matrix by A (m x n). This - * will compute matrices U, S, V such that A ~= U * S * V', where S contains the leading k - * singular values, U and V contain the corresponding singular vectors. - * - * At most k largest non-zero singular values and associated vectors are returned. If there are k - * such values, then the dimensions of the return will be: - * - U is a RowMatrix of size m x k that satisfies U' * U = eye(k), - * - s is a Vector of size k, holding the singular values in descending order, - * - V is a Matrix of size n x k that satisfies V' * V = eye(k). - * - * We assume n is smaller than m, though this is not strictly required. - * The singular values and the right singular vectors are derived - * from the eigenvalues and the eigenvectors of the Gramian matrix A' * A. U, the matrix - * storing the right singular vectors, is computed via matrix multiplication as - * U = A * (V * S^-1^), if requested by user. The actual method to use is determined - * automatically based on the cost: - * - If n is small (n < 100) or k is large compared with n (k > n / 2), we compute - * the Gramian matrix first and then compute its top eigenvalues and eigenvectors locally - * on the driver. This requires a single pass with O(n^2^) storage on each executor and - * on the driver, and O(n^2^ k) time on the driver. - * - Otherwise, we compute (A' * A) * v in a distributive way and send it to ARPACK's DSAUPD to - * compute (A' * A)'s top eigenvalues and eigenvectors on the driver node. This requires O(k) - * passes, O(n) storage on each executor, and O(n k) storage on the driver. - * - * Several internal parameters are set to default values. The reciprocal condition number rCond - * is set to 1e-9. All singular values smaller than rCond * sigma(0) are treated as zeros, where - * sigma(0) is the largest singular value. The maximum number of Arnoldi update iterations for - * ARPACK is set to 300 or k * 3, whichever is larger. The numerical tolerance for ARPACK's - * eigen-decomposition is set to 1e-10. - * - * @param k number of leading singular values to keep (0 < k <= n). - * It might return less than k if - * there are numerically zero singular values or there are not enough Ritz values - * converged before the maximum number of Arnoldi update iterations is reached (in case - * that matrix A is ill-conditioned). - * @param computeU whether to compute U - * @param rCond the reciprocal condition number. All singular values smaller than rCond * sigma(0) - * are treated as zero, where sigma(0) is the largest singular value. - * @return SingularValueDecomposition(U, s, V). U = null if computeU = false. - * @note The conditions that decide which method to use internally and the default parameters are - * subject to change. - */ - @Since("1.0.0") - def computeSVD( - k: Int, - computeU: Boolean = false, - rCond: Double = 1e-9): SingularValueDecomposition[RowMatrix, Matrix] = { - // maximum number of Arnoldi update iterations for invoking ARPACK - val maxIter = math.max(300, k * 3) - // numerical tolerance for invoking ARPACK - val tol = 1e-10 - computeSVD(k, computeU, rCond, maxIter, tol, "auto") - } - - /** - * The actual SVD implementation, visible for testing. - * - * @param k number of leading singular values to keep (0 < k <= n) - * @param computeU whether to compute U - * @param rCond the reciprocal condition number - * @param maxIter max number of iterations (if ARPACK is used) - * @param tol termination tolerance (if ARPACK is used) - * @param mode computation mode (auto: determine automatically which mode to use, - * local-svd: compute gram matrix and computes its full SVD locally, - * local-eigs: compute gram matrix and computes its top eigenvalues locally, - * dist-eigs: compute the top eigenvalues of the gram matrix distributively) - * @return SingularValueDecomposition(U, s, V). U = null if computeU = false. - */ - private[mllib] def computeSVD( - k: Int, - computeU: Boolean, - rCond: Double, - maxIter: Int, - tol: Double, - mode: String): SingularValueDecomposition[RowMatrix, Matrix] = { - val n = numCols().toInt - require(k > 0 && k <= n, s"Requested k singular values but got k=$k and numCols=$n.") - - object SVDMode extends Enumeration { - val LocalARPACK, LocalLAPACK, DistARPACK = Value - } - - val modeStr = if (mode == "auto") RowMatrixUtil.selectSVDBranch(n, k) else mode - val computeMode = modeStr match { - case "local-svd" => SVDMode.LocalLAPACK - case "local-eigs" => SVDMode.LocalARPACK - case "dist-eigs" => SVDMode.DistARPACK - case _ => throw new IllegalArgumentException(s"Do not support mode $mode.") - } - - val isSparse: Boolean = rows.map(_.isInstanceOf[SparseVector]).reduce((x, y) => x && y) - - // Compute the eigen-decomposition of A' * A. - val (sigmaSquares: BDV[Double], u: BDM[Double]) = computeMode match { - case SVDMode.LocalARPACK => - require(k < n, s"k must be smaller than n in local-eigs mode but got k=$k and n=$n.") - if (isSparse) { - eigenValueDecompositionOnDenseMatrix(n, k, tol, maxIter) - } else { - val G = computeDenseGramianMatrix().asBreeze.asInstanceOf[BDM[Double]] - val driverCores = RowMatrixUtil.parseExtraParams(rows.sparkContext, -1) - EigenValueDecomposition.symmetricEigsLocal(G, n, k, tol, maxIter, driverCores) - } - case SVDMode.LocalLAPACK => - // svd latent constraint, 2 * n * n + 6 * n + 1 < Int.MaxValue - require(n < 32767, s"$n exceeds the breeze svd capability") - val G = computeGramianMatrix().asBreeze.asInstanceOf[BDM[Double]] - val Eigen(uFull, sigmaSquaresFull) = EigenDecomposition.symmetricEigenDecomposition(G) - (sigmaSquaresFull, uFull) - case SVDMode.DistARPACK => - if (rows.getStorageLevel == StorageLevel.NONE) { - logWarning("The input data is not directly cached, which may hurt performance if its" - + " parent RDDs are also uncached.") - } - require(k < n, s"k must be smaller than n in dist-eigs mode but got k=$k and n=$n.") - if (isSparse) { - eigenValueDecompositionOnSparseMatrix(n, k, tol, maxIter) - } else { - EigenValueDecomposition.symmetricEigs(multiplyGramianMatrixBy, n, k, tol, maxIter) - } - } - - val sigmas: BDV[Double] = brzSqrt(sigmaSquares) - - // Determine the effective rank. - val sigma0 = sigmas(0) - val threshold = rCond * sigma0 - var i = 0 - // sigmas might have a length smaller than k, if some Ritz values do not satisfy the convergence - // criterion specified by tol after max number of iterations. - // Thus use i < min(k, sigmas.length) instead of i < k. - if (sigmas.length < k) { - logWarning(s"Requested $k singular values but only found ${sigmas.length} converged.") - } - while (i < math.min(k, sigmas.length) && sigmas(i) >= threshold) { - i += 1 - } - val sk = i - - if (sk < k) { - logWarning(s"Requested $k singular values but only found $sk nonzeros.") - } - - // Warn at the end of the run as well, for increased visibility. - if (computeMode == SVDMode.DistARPACK && rows.getStorageLevel == StorageLevel.NONE) { - logWarning("The input data was not directly cached, which may hurt performance if its" - + " parent RDDs are also uncached.") - } - - val s = Vectors.dense(Arrays.copyOfRange(sigmas.data, 0, sk)) - val V = Matrices.dense(n, sk, Arrays.copyOfRange(u.data, 0, n * sk)) - - if (computeU) { - // N = Vk * Sk^{-1} - val N = new BDM[Double](n, sk, Arrays.copyOfRange(u.data, 0, n * sk)) - var i = 0 - var j = 0 - while (j < sk) { - i = 0 - val sigma = sigmas(j) - while (i < n) { - N(i, j) /= sigma - i += 1 - } - j += 1 - } - val U = this.multiply(Matrices.fromBreeze(N)) - SingularValueDecomposition(U, s, V) - } else { - SingularValueDecomposition(null, s, V) - } - } - - /** - * Distributed algorithm of computing covariance matrix for a dense matrix with dimension (m,n). - * @param mean Mean value vector of size n - * @param n Column number - * @param m Row number - * @return Covariance matrix - */ - private def computeDenseVectorCovariance(mean: Vector, n: Int, m: Long): Matrix = { - val meanBroadcast = rows.context.broadcast(mean) - - // centralize matrix - val centralizedRows = rows.map(row => { - val mean = meanBroadcast.value - val centralizedRow = new Array[Double](n) - for (idx <- 0 until n) - centralizedRow(idx) = row(idx) - mean(idx) - Vectors.dense(centralizedRow) - }) - - // compute the upper triangular matrix - val covarianceLen = n * (n + 1) / 2 - val covariance = centralizedRows.mapPartitions(iter => { - val subMatrixValues = iter.map(_.toArray).toArray - val subMatrixRow = subMatrixValues.length - val localCovariance = new Array[Double](covarianceLen) - Gramian.compute(subMatrixValues.flatten, localCovariance, subMatrixRow, n) - Array(localCovariance).iterator - }).treeReduce((cov1, cov2) => { - blas.daxpy(cov1.length, 1.0, cov2, 1, cov1, 1) - cov1 - }, depth = 4) - - // full fill the covariance matrix - val fullCovariance = new Array[Double](n * n) - val m1 = m - 1.0 - for(i <- StaticUtils.ZERO_INT until n) { - val srcOffset = (2 * n - i + 1) * i / 2 - fullCovariance(i * n + i) = covariance(srcOffset) / m1 - for(j <- i + 1 until n) { - val v = covariance(srcOffset + j - i) / m1 - fullCovariance(i * n + j) = v - fullCovariance(j * n + i) = v - } - } - - new DenseMatrix(n, n, fullCovariance) - } - - /** - * Distributed algorithm of computing covariance matrix for a sparse matrix with dimension (m,n). - * @param mean Mean value vector of size n - * @param n Column number - * @param m Row number - * @return Covariance matrix - */ - def computeSparseVectorCovariance(mean: Vector, n: Int, m: Long): Matrix = { - val G = RowMatrixUtil.computeGramMatrixAsDenseMatrix( - rows.map(_.asInstanceOf[SparseVector]), n) - var i = 0 - var j = 0 - val m1 = m - 1.0 - var alpha = 0.0 - while (i < n) { - alpha = m / m1 * mean(i) - j = i - while (j < n) { - val Gij = G(i, j) / m1 - alpha * mean(j) - G(i, j) = Gij - G(j, i) = Gij - j += 1 - } - i += 1 - } - G - } - - /** - * Compute covariance matrix with formula Cov(X, Y) = E[(X-E(X))(Y-E(Y))] - * @return Covariance matrix - */ - def computeCovariance(): Matrix = { - val isSparse = rows.map(_.isInstanceOf[SparseVector]).reduce((x, y) => x && y) - - val n = numCols().toInt - checkNumColumns(n) - val summary = computeColumnSummaryStatistics() - val m = summary.count - require(m > 1, s"RowMatrix.computeCovariance called on matrix with only $m rows." + - " Cannot compute the covariance of a RowMatrix with <= 1 row.") - val mean = summary.mean - - if (isSparse) { - computeSparseVectorCovariance(mean, n, m) - } else { - computeDenseVectorCovariance(mean, n, m) - } - } - - /** - * Computes the top k principal components and a vector of proportions of - * variance explained by each principal component. - * Rows correspond to observations and columns correspond to variables. - * The principal components are stored a local matrix of size n-by-k. - * Each column corresponds for one principal component, - * and the columns are in descending order of component variance. - * The row data do not need to be "centered" first; it is not necessary for - * the mean of each column to be 0. - * - * @param k number of top principal components. - * @param mode number of top principal components. - * @return a matrix of size n-by-k, whose columns are principal components, and - * a vector of values which indicate how much variance each principal component - * explains - */ - @Since("1.6.0") - def computePrincipalComponentsAndExplainedVarianceBody( - k: Int, - mode: String = "auto"): (Matrix, Vector) = { - val n = numCols().toInt - require(k > 0 && k <= n, s"k = $k out of range (0, n = $n]") - - object PCAMode extends Enumeration { - val Correlation, SVD, SparseSVD = Value - } - val checkSparseBranch = if (rows.map(_.isInstanceOf[SparseVector]) - .reduce((x, y) => x && y)) { - PCAMode.SparseSVD - } else { - PCAMode.SVD - } - val computeMode = mode match { - case "Correlation" => PCAMode.Correlation - case "SVD" => checkSparseBranch - case _ => - if (n == k || n < 1500) { - PCAMode.Correlation - } else { - checkSparseBranch - } - } - computeMode match { - case PCAMode.Correlation => - val cov = computeCovariance().asBreeze.asInstanceOf[BDM[Double]] - val Eigen(u, s) = EigenDecomposition.symmetricEigenDecomposition(cov) - - val eigenSum = s.data.sum - val explainedVariance = s.data.map(_ / eigenSum) - if (k == n) { - (Matrices.dense(n, k, u.data), Vectors.dense(explainedVariance)) - } else { - (Matrices.dense(n, k, Arrays.copyOfRange(u.data, 0, n * k)), - Vectors.dense(Arrays.copyOfRange(explainedVariance, 0, k))) - } - case PCAMode.SVD => - val stas = Statistics.colStats(rows) - val meanVector = stas.mean.asBreeze - val centredMatrix = new RowMatrix(rows.map { rowVector => - Vectors.fromBreeze(rowVector.asBreeze - meanVector) - }) - val svd = centredMatrix.computeSVD(k) - val s = svd.s.toArray.map(eigValue => eigValue * eigValue / (numRows().toInt - 1)) - val eigenSum = stas.variance.toArray.sum - val explainedVariance = s.map(_ / eigenSum) - (svd.V, Vectors.dense(explainedVariance)) - case PCAMode.SparseSVD => - val model = new SPCA(k).fit(rows) - (model.pc.asInstanceOf[Matrix], model.explainedVariance.asInstanceOf[Vector]) - } - } - - /** - * Computes the top k principal components and a vector of proportions of - * variance explained by each principal component. - * - * @param k number of top principal components. - * @return a matrix of size n-by-k, whose columns are principal components, and - * a vector of values which indicate how much variance each principal component - * explains - */ - def computePrincipalComponentsAndExplainedVariance(k: Int): (Matrix, Vector) = { - computePrincipalComponentsAndExplainedVarianceBody(k) - } - - /** - * Computes the top k principal components only. - * - * @param k number of top principal components. - * @return a matrix of size n-by-k, whose columns are principal components - * @see computePrincipalComponentsAndExplainedVariance - */ - @Since("1.0.0") - def computePrincipalComponents(k: Int): Matrix = { - computePrincipalComponentsAndExplainedVariance(k)._1 - } - - /** - * Computes column-wise summary statistics. - */ - @Since("1.0.0") - def computeColumnSummaryStatistics(): MultivariateStatisticalSummary = { - val summary = rows.treeAggregate(new MultivariateOnlineSummarizer)( - (aggregator, data) => aggregator.add(data), - (aggregator1, aggregator2) => aggregator1.merge(aggregator2)) - updateNumRows(summary.count) - summary - } - - /** - * Multiply this matrix by a local matrix on the right. - * - * @param B a local matrix whose number of rows must match the number of columns of this matrix - * @return a [[org.apache.spark.mllib.linalg.distributed.RowMatrix]] representing the product, - * which preserves partitioning - */ - @Since("1.0.0") - def multiply(B: Matrix): RowMatrix = { - val n = numCols().toInt - val k = B.numCols - require(n == B.numRows, s"Dimension mismatch: $n vs ${B.numRows}") - - require(B.isInstanceOf[DenseMatrix], - s"Only support dense matrix at this time but found ${B.getClass.getName}.") - - val Bb = rows.context.broadcast(B.asBreeze.asInstanceOf[BDM[Double]].toDenseVector.toArray) - val AB = rows.mapPartitions { iter => - val Bi = Bb.value - iter.map { row => - val v = BDV.zeros[Double](k) - var i = 0 - while (i < k) { - v(i) = row.asBreeze.dot(new BDV(Bi, i * n, 1, n)) - i += 1 - } - Vectors.fromBreeze(v) - } - } - - new RowMatrix(AB, nRows, B.numCols) - } - - /** - * Compute all cosine similarities between columns of this matrix using the brute-force - * approach of computing normalized dot products. - * - * @return An n x n sparse upper-triangular matrix of cosine similarities between - * columns of this matrix. - */ - @Since("1.2.0") - def columnSimilarities(): CoordinateMatrix = { - columnSimilarities(0.0) - } - - /** - * Compute similarities between columns of this matrix using a sampling approach. - * - * The threshold parameter is a trade-off knob between estimate quality and computational cost. - * - * Setting a threshold of 0 guarantees deterministic correct results, but comes at exactly - * the same cost as the brute-force approach. Setting the threshold to positive values - * incurs strictly less computational cost than the brute-force approach, however the - * similarities computed will be estimates. - * - * The sampling guarantees relative-error correctness for those pairs of columns that have - * similarity greater than the given similarity threshold. - * - * To describe the guarantee, we set some notation: - * Let A be the smallest in magnitude non-zero element of this matrix. - * Let B be the largest in magnitude non-zero element of this matrix. - * Let L be the maximum number of non-zeros per row. - * - * For example, for {0,1} matrices: A=B=1. - * Another example, for the Netflix matrix: A=1, B=5 - * - * For those column pairs that are above the threshold, - * the computed similarity is correct to within 20% relative error with probability - * at least 1 - (0.981)^10/B^ - * - * The shuffle size is bounded by the *smaller* of the following two expressions: - * - * O(n log(n) L / (threshold * A)) - * O(m L^2^) - * - * The latter is the cost of the brute-force approach, so for non-zero thresholds, - * the cost is always cheaper than the brute-force approach. - * - * @param threshold Set to 0 for deterministic guaranteed correctness. - * Similarities above this threshold are estimated - * with the cost vs estimate quality trade-off described above. - * @return An n x n sparse upper-triangular matrix of cosine similarities - * between columns of this matrix. - */ - @Since("1.2.0") - def columnSimilarities(threshold: Double): CoordinateMatrix = { - require(threshold >= 0, s"Threshold cannot be negative: $threshold") - - if (threshold > 1) { - logWarning(s"Threshold is greater than 1: $threshold " + - "Computation will be more efficient with promoted sparsity, " + - " however there is no correctness guarantee.") - } - - val gamma = if (threshold < 1e-6) { - Double.PositiveInfinity - } else { - 10 * math.log(numCols()) / threshold - } - - columnSimilaritiesDIMSUM(computeColumnSummaryStatistics().normL2.toArray, gamma) - } - - /** - * Compute QR decomposition for [[RowMatrix]]. The implementation is designed to optimize the QR - * decomposition (factorization) for the [[RowMatrix]] of a tall and skinny shape. - * Reference: - * Paul G. Constantine, David F. Gleich. "Tall and skinny QR factorizations in MapReduce - * architectures" (see here) - * - * @param computeQ whether to computeQ - * @return QRDecomposition(Q, R), Q = null if computeQ = false. - */ - @Since("1.5.0") - def tallSkinnyQR(computeQ: Boolean = false): QRDecomposition[RowMatrix, Matrix] = { - val col = numCols().toInt - // split rows horizontally into smaller matrices, and compute QR for each of them - val blockQRs = rows.retag(classOf[Vector]).glom().filter(_.length != 0).map { partRows => - val bdm = BDM.zeros[Double](partRows.length, col) - var i = 0 - partRows.foreach { row => - bdm(i, ::) := row.asBreeze.t - i += 1 - } - breeze.linalg.qr.reduced(bdm).r - } - - // combine the R part from previous results vertically into a tall matrix - val combinedR = blockQRs.treeReduce { (r1, r2) => - val stackedR = BDM.vertcat(r1, r2) - breeze.linalg.qr.reduced(stackedR).r - } - - val finalR = Matrices.fromBreeze(combinedR.toDenseMatrix) - val finalQ = if (computeQ) { - try { - val invR = inv(combinedR) - this.multiply(Matrices.fromBreeze(invR)) - } catch { - case err: MatrixSingularException => - logWarning("R is not invertible and return Q as null") - null - } - } else { - null - } - QRDecomposition(finalQ, finalR) - } - - /** - * Find all similar columns using the DIMSUM sampling algorithm, described in two papers - * - * http://arxiv.org/abs/1206.2082 - * http://arxiv.org/abs/1304.1467 - * - * @param colMags A vector of column magnitudes - * @param gamma The oversampling parameter. For provable results, set to 10 * log(n) / s, - * where s is the smallest similarity score to be estimated, - * and n is the number of columns - * @return An n x n sparse upper-triangular matrix of cosine similarities - * between columns of this matrix. - */ - private[mllib] def columnSimilaritiesDIMSUM( - colMags: Array[Double], - gamma: Double): CoordinateMatrix = { - require(gamma > 1.0, s"Oversampling should be greater than 1: $gamma") - require(colMags.size == this.numCols(), "Number of magnitudes didn't match column dimension") - val sg = math.sqrt(gamma) // sqrt(gamma) used many times - - // Don't divide by zero for those columns with zero magnitude - val colMagsCorrected = colMags.map(x => if (x == 0) 1.0 else x) - - val sc = rows.context - val pBV = sc.broadcast(colMagsCorrected.map(c => sg / c)) - val qBV = sc.broadcast(colMagsCorrected.map(c => math.min(sg, c))) - - val sims = rows.mapPartitionsWithIndex { (indx, iter) => - val p = pBV.value - val q = qBV.value - - val rand = new XORShiftRandom(indx) - val scaled = new Array[Double](p.size) - iter.flatMap { row => - row match { - case SparseVector(size, indices, values) => - val nnz = indices.size - var k = 0 - while (k < nnz) { - scaled(k) = values(k) / q(indices(k)) - k += 1 - } - - Iterator.tabulate(nnz) { k => - val buf = new ListBuffer[((Int, Int), Double)]() - val i = indices(k) - val iVal = scaled(k) - if (iVal != 0 && rand.nextDouble() < p(i)) { - var l = k + 1 - while (l < nnz) { - val j = indices(l) - val jVal = scaled(l) - if (jVal != 0 && rand.nextDouble() < p(j)) { - buf += (((i, j), iVal * jVal)) - } - l += 1 - } - } - buf - }.flatten - case DenseVector(values) => - val n = values.size - var i = 0 - while (i < n) { - scaled(i) = values(i) / q(i) - i += 1 - } - Iterator.tabulate(n) { i => - val buf = new ListBuffer[((Int, Int), Double)]() - val iVal = scaled(i) - if (iVal != 0 && rand.nextDouble() < p(i)) { - var j = i + 1 - while (j < n) { - val jVal = scaled(j) - if (jVal != 0 && rand.nextDouble() < p(j)) { - buf += (((i, j), iVal * jVal)) - } - j += 1 - } - } - buf - }.flatten - } - } - }.reduceByKey(_ + _).map { case ((i, j), sim) => - MatrixEntry(i.toLong, j.toLong, sim) - } - new CoordinateMatrix(sims, numCols(), numCols()) - } - - private[mllib] override def toBreeze(): BDM[Double] = { - val m = numRows().toInt - val n = numCols().toInt - val mat = BDM.zeros[Double](m, n) - var i = 0 - rows.collect().foreach { vector => - vector.foreachActive { case (j, v) => - mat(i, j) = v - } - i += 1 - } - mat - } - - /** Updates or verifies the number of rows. */ - private def updateNumRows(m: Long) { - if (nRows <= 0) { - nRows = m - } else { - require(nRows == m, - s"The number of rows $m is different from what specified or previously computed: ${nRows}.") - } - } -} - -@Since("1.0.0") -object RowMatrix { - - /** - * Fills a full square matrix from its upper triangular part. - */ - private def triuToFull(n: Int, U: Array[Double]): Matrix = { - val G = new BDM[Double](n, n) - - var row = 0 - var col = 0 - var idx = 0 - var value = 0.0 - while (col < n) { - row = 0 - while (row < col) { - value = U(idx) - G(row, col) = value - G(col, row) = value - idx += 1 - row += 1 - } - G(col, col) = U(idx) - idx += 1 - col += 1 - } - - Matrices.dense(n, n, G.data) - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/Correlation.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/Correlation.scala deleted file mode 100644 index 5d58eed..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/Correlation.scala +++ /dev/null @@ -1,103 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.stat.correlation - -import org.apache.spark.mllib.linalg.{DenseVector, Matrix, Vector} -import org.apache.spark.rdd.RDD - -/** - * Trait for correlation algorithms. - */ -private[stat] trait Correlation { - - /** - * Compute correlation for two datasets. - */ - def computeCorrelation(x: RDD[Double], y: RDD[Double]): Double - - /** - * Compute the correlation matrix S, for the input matrix, where S(i, j) is the correlation - * between column i and j. S(i, j) can be NaN if the correlation is undefined for column i and j. - */ - def computeCorrelationMatrix(X: RDD[Vector]): Matrix - - /** - * Combine the two input RDD[Double]s into an RDD[Vector] and compute the correlation using the - * correlation implementation for RDD[Vector]. Can be NaN if correlation is undefined for the - * input vectors. - */ - def computeCorrelationWithMatrixImpl(x: RDD[Double], y: RDD[Double]): Double = { - val mat: RDD[Vector] = x.zip(y).map { case (xi, yi) => new DenseVector(Array(xi, yi)) } - computeCorrelationMatrix(mat)(0, 1) - } - -} - -/** - * Delegates computation to the specific correlation object based on the input method name. - */ -private[stat] object Correlations { - - def corr(x: RDD[Double], - y: RDD[Double], - method: String = CorrelationNames.defaultCorrName): Double = { - val correlation = getCorrelationFromName(method) - correlation.computeCorrelation(x, y) - } - - def corrMatrix(X: RDD[Vector], - method: String = CorrelationNames.defaultCorrName): Matrix = { - val correlation = getCorrelationFromName(method) - correlation.computeCorrelationMatrix(X) - } - - // Match input correlation name with a known name via simple string matching. - def getCorrelationFromName(method: String): Correlation = { - try { - CorrelationNames.nameToObjectMap(method) - } catch { - case nse: NoSuchElementException => - throw new IllegalArgumentException("Unrecognized method name. Supported correlations: " - + CorrelationNames.nameToObjectMap.keys.mkString(", ")) - } - } -} - -/** - * Maintains supported and default correlation names. - * - * Currently supported correlations: `pearson`, `spearman`. - * Current default correlation: `pearson`. - * - * After new correlation algorithms are added, please update the documentation here and in - * Statistics.scala for the correlation APIs. - */ -private[mllib] object CorrelationNames { - - // Note: after new types of correlations are implemented, please update this map. - val nameToObjectMap = Map(("pearson", PearsonCorrelation), ("spearman", SpearmanCorrelation)) - val defaultCorrName: String = "pearson" - -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/PearsonCorrelation.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/PearsonCorrelation.scala deleted file mode 100644 index 5bee973..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/PearsonCorrelation.scala +++ /dev/null @@ -1,119 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.stat.correlation - -import breeze.linalg.{DenseMatrix => BDM} - -import org.apache.spark.internal.Logging -import org.apache.spark.mllib.linalg.{DenseVector, Matrices, Matrix, SparseVector, Vector} -import org.apache.spark.mllib.linalg.distributed.RowMatrix -import org.apache.spark.rdd.RDD - -/** - * Compute Pearson correlation for two RDDs of the type RDD[Double] or the correlation matrix - * for an RDD of the type RDD[Vector]. - * - * Definition of Pearson correlation can be found at - * http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient - */ -private[stat] object PearsonCorrelation extends Correlation with Logging { - - /** - * Compute the Pearson correlation for two datasets. NaN if either vector has 0 variance. - */ - override def computeCorrelation(x: RDD[Double], y: RDD[Double]): Double = { - computeCorrelationWithMatrixImpl(x, y) - } - - /** - * Compute the Pearson correlation matrix S, for the input matrix, where S(i, j) is the - * correlation between column i and j. 0 covariance results in a correlation value of Double.NaN. - */ - override def computeCorrelationMatrix(X: RDD[Vector]): Matrix = { - val isSparse: Boolean = X.map(_.isInstanceOf[SparseVector]).treeReduce((x, y) => x && y) - if (isSparse) { - val rowMatrix = new RowMatrix(X) - val cov = rowMatrix.computeCovariance() - computeCorrelationMatrixFromCovariance(cov) - } else { - PearsonCorrelationUtil.computeDenseVectorCorrelation(X.map(_.asInstanceOf[DenseVector])) - } - } - - /** - * Compute the Pearson correlation matrix from the covariance matrix. - * 0 variance results in a correlation value of Double.NaN. - */ - def computeCorrelationMatrixFromCovariance(covarianceMatrix: Matrix): Matrix = { - val cov = covarianceMatrix.asBreeze.asInstanceOf[BDM[Double]] - val n = cov.cols - - // Compute the standard deviation on the diagonals first - var i = 0 - while (i < n) { - // TODO remove once covariance numerical issue resolved. - cov(i, i) = if (closeToZero(cov(i, i))) 0.0 else math.sqrt(cov(i, i)) - i +=1 - } - - // Loop through columns since cov is column major - var j = 0 - var sigma = 0.0 - var containNaN = false - while (j < n) { - sigma = cov(j, j) - i = 0 - while (i < j) { - val corr = if (sigma == 0.0 || cov(i, i) == 0.0) { - containNaN = true - Double.NaN - } else { - cov(i, j) / (sigma * cov(i, i)) - } - cov(i, j) = corr - cov(j, i) = corr - i += 1 - } - j += 1 - } - - // put 1.0 on the diagonals - i = 0 - while (i < n) { - cov(i, i) = 1.0 - i +=1 - } - - if (containNaN) { - logWarning("Pearson correlation matrix contains NaN values.") - } - - Matrices.fromBreeze(cov) - } - - private def closeToZero(value: Double, threshold: Double = 1e-12): Boolean = { - math.abs(value) <= threshold - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/SpearmanCorrelation.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/SpearmanCorrelation.scala deleted file mode 100644 index eaf0afc..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/stat/correlation/SpearmanCorrelation.scala +++ /dev/null @@ -1,65 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.stat.correlation - -import org.apache.spark.internal.Logging -import org.apache.spark.mllib.linalg.{Matrix, Vector} -import org.apache.spark.mllib.linalg.distributed.RowMatrix -import org.apache.spark.rdd.RDD -import org.apache.spark.storage.StorageLevel - -/** - * Compute Spearman's correlation for two RDDs of the type RDD[Double] or the correlation matrix - * for an RDD of the type RDD[Vector]. - * - * Definition of Spearman's correlation can be found at - * http://en.wikipedia.org/wiki/Spearman's_rank_correlation_coefficient - */ -private[stat] object SpearmanCorrelation extends Correlation with Logging { - - /** - * Compute Spearman's correlation for two datasets. - */ - override def computeCorrelation(x: RDD[Double], y: RDD[Double]): Double = { - computeCorrelationWithMatrixImpl(x, y) - } - - /** - * Compute Spearman's correlation matrix S, for the input matrix, where S(i, j) is the - * correlation between column i and j. - */ - override def computeCorrelationMatrix(X: RDD[Vector]): Matrix = { - val groupedRanks = SpearmanCorrelationUtil.getRanks(X) - .persist(StorageLevel.MEMORY_AND_DISK) - .setName("groupedRanks") - groupedRanks.foreach(_ => {}) - X.sparkContext.getPersistentRDDs.foreach{case (_, rdd) => - if (!rdd.name.contains("groupedRanks")) rdd.unpersist() - } - val rowMatrix = new RowMatrix(groupedRanks) - val cov = rowMatrix.computeCovariance() - PearsonCorrelation.computeCorrelationMatrixFromCovariance(cov) - } -} diff --git a/ml-accelerator/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala b/ml-accelerator/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala deleted file mode 100644 index bf402c3..0000000 --- a/ml-accelerator/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala +++ /dev/null @@ -1,291 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.tree - -import scala.collection.JavaConverters._ - -import org.apache.spark.annotation.Since -import org.apache.spark.api.java.JavaRDD -import org.apache.spark.internal.Logging -import org.apache.spark.ml.tree.impl.{DecisionForest, DTUtils} -import org.apache.spark.mllib.regression.LabeledPoint -import org.apache.spark.mllib.tree.configuration.Algo._ -import org.apache.spark.mllib.tree.configuration.QuantileStrategy._ -import org.apache.spark.mllib.tree.configuration.Strategy -import org.apache.spark.mllib.tree.impurity._ -import org.apache.spark.mllib.tree.model._ -import org.apache.spark.rdd.RDD - - -/** - * A class which implements a decision tree learning algorithm for classification and regression. - * It supports both continuous and categorical features. - * - * @param strategy The configuration parameters for the tree algorithm which specify the type - * of decision tree (classification or regression), feature type (continuous, - * categorical), depth of the tree, quantile calculation strategy, etc. - * @param seed Random seed. - */ -@Since("1.0.0") -class DecisionTree private[spark] (private val strategy: Strategy, private val seed: Int) - extends Serializable with Logging { - - /** - * @param strategy The configuration parameters for the tree algorithm which specify the type - * of decision tree (classification or regression), feature type (continuous, - * categorical), depth of the tree, quantile calculation strategy, etc. - */ - @Since("1.0.0") - def this(strategy: Strategy) = this(strategy, seed = 0) - - strategy.assertValid() - - /** - * Method to train a decision tree model over an RDD - * - * @param input Training data: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]]. - * @return DecisionTreeModel that can be used for prediction. - */ - @Since("1.2.0") - def run(input: RDD[LabeledPoint]): DecisionTreeModel = { - val trees = DecisionForest.run(input.map(_.asML), strategy, numTrees = 1, - featureSubsetStrategy = "all", seed = seed.toLong, None) - val rfModel = new RandomForestModel(strategy.algo, trees.map(_.toOld)) - rfModel.trees(0) - } -} - -@Since("1.0.0") -object DecisionTree extends Serializable with Logging { - - /** - * Method to train a decision tree model. - * The method supports binary and multiclass classification and regression. - * - * @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]]. - * For classification, labels should take values {0, 1, ..., numClasses-1}. - * For regression, labels are real numbers. - * @param strategy The configuration parameters for the tree algorithm which specify the type - * of decision tree (classification or regression), feature type (continuous, - * categorical), depth of the tree, quantile calculation strategy, etc. - * @return DecisionTreeModel that can be used for prediction. - * - * @note Using `org.apache.spark.mllib.tree.DecisionTree.trainClassifier` - * and `org.apache.spark.mllib.tree.DecisionTree.trainRegressor` - * is recommended to clearly separate classification and regression. - */ - @Since("1.0.0") - def train(input: RDD[LabeledPoint], strategy: Strategy): DecisionTreeModel = { - new DecisionTree(strategy).run(input) - } - - /** - * Method to train a decision tree model. - * The method supports binary and multiclass classification and regression. - * - * @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]]. - * For classification, labels should take values {0, 1, ..., numClasses-1}. - * For regression, labels are real numbers. - * @param algo Type of decision tree, either classification or regression. - * @param impurity Criterion used for information gain calculation. - * @param maxDepth Maximum depth of the tree (e.g. depth 0 means 1 leaf node, depth 1 means - * 1 internal node + 2 leaf nodes). - * @return DecisionTreeModel that can be used for prediction. - * - * @note Using `org.apache.spark.mllib.tree.DecisionTree.trainClassifier` - * and `org.apache.spark.mllib.tree.DecisionTree.trainRegressor` - * is recommended to clearly separate classification and regression. - */ - @Since("1.0.0") - def train( - input: RDD[LabeledPoint], - algo: Algo, - impurity: Impurity, - maxDepth: Int): DecisionTreeModel = { - val (_, maxMemInMB) = DTUtils.getInvisibleParamsForMLLib(input) - val strategy = - new Strategy(algo, impurity, maxDepth, maxMemoryInMB = maxMemInMB) - new DecisionTree(strategy).run(input) - } - - /** - * Method to train a decision tree model. - * The method supports binary and multiclass classification and regression. - * - * @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]]. - * For classification, labels should take values {0, 1, ..., numClasses-1}. - * For regression, labels are real numbers. - * @param algo Type of decision tree, either classification or regression. - * @param impurity Criterion used for information gain calculation. - * @param maxDepth Maximum depth of the tree (e.g. depth 0 means 1 leaf node, depth 1 means - * 1 internal node + 2 leaf nodes). - * @param numClasses Number of classes for classification. Default value of 2. - * @return DecisionTreeModel that can be used for prediction. - * - * @note Using `org.apache.spark.mllib.tree.DecisionTree.trainClassifier` - * and `org.apache.spark.mllib.tree.DecisionTree.trainRegressor` - * is recommended to clearly separate classification and regression. - */ - @Since("1.2.0") - def train( - input: RDD[LabeledPoint], - algo: Algo, - impurity: Impurity, - maxDepth: Int, - numClasses: Int): DecisionTreeModel = { - val (_, maxMemInMB) = DTUtils.getInvisibleParamsForMLLib(input) - val strategy = new Strategy(algo, impurity, maxDepth, numClasses, - maxMemoryInMB = maxMemInMB) - new DecisionTree(strategy).run(input) - } - - /** - * Method to train a decision tree model. - * The method supports binary and multiclass classification and regression. - * - * @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]]. - * For classification, labels should take values {0, 1, ..., numClasses-1}. - * For regression, labels are real numbers. - * @param algo Type of decision tree, either classification or regression. - * @param impurity Criterion used for information gain calculation. - * @param maxDepth Maximum depth of the tree (e.g. depth 0 means 1 leaf node, depth 1 means - * 1 internal node + 2 leaf nodes). - * @param numClasses Number of classes for classification. Default value of 2. - * @param maxBins Maximum number of bins used for splitting features. - * @param quantileCalculationStrategy Algorithm for calculating quantiles. - * @param categoricalFeaturesInfo Map storing arity of categorical features. An entry (n to k) - * indicates that feature n is categorical with k categories - * indexed from 0: {0, 1, ..., k-1}. - * @return DecisionTreeModel that can be used for prediction. - * - * @note Using `org.apache.spark.mllib.tree.DecisionTree.trainClassifier` - * and `org.apache.spark.mllib.tree.DecisionTree.trainRegressor` - * is recommended to clearly separate classification and regression. - */ - @Since("1.0.0") - def train( - input: RDD[LabeledPoint], - algo: Algo, - impurity: Impurity, - maxDepth: Int, - numClasses: Int, - maxBins: Int, - quantileCalculationStrategy: QuantileStrategy, - categoricalFeaturesInfo: Map[Int, Int]): DecisionTreeModel = { - val (_, maxMemInMB) = DTUtils.getInvisibleParamsForMLLib(input) - val strategy = new Strategy(algo, impurity, maxDepth, numClasses, maxBins, - quantileCalculationStrategy, categoricalFeaturesInfo, maxMemoryInMB = maxMemInMB) - new DecisionTree(strategy).run(input) - } - - /** - * Method to train a decision tree model for binary or multiclass classification. - * - * @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]]. - * Labels should take values {0, 1, ..., numClasses-1}. - * @param numClasses Number of classes for classification. - * @param categoricalFeaturesInfo Map storing arity of categorical features. An entry (n to k) - * indicates that feature n is categorical with k categories - * indexed from 0: {0, 1, ..., k-1}. - * @param impurity Criterion used for information gain calculation. - * Supported values: "gini" (recommended) or "entropy". - * @param maxDepth Maximum depth of the tree (e.g. depth 0 means 1 leaf node, depth 1 means - * 1 internal node + 2 leaf nodes). - * (suggested value: 5) - * @param maxBins Maximum number of bins used for splitting features. - * (suggested value: 32) - * @return DecisionTreeModel that can be used for prediction. - */ - @Since("1.1.0") - def trainClassifier( - input: RDD[LabeledPoint], - numClasses: Int, - categoricalFeaturesInfo: Map[Int, Int], - impurity: String, - maxDepth: Int, - maxBins: Int): DecisionTreeModel = { - val impurityType = Impurities.fromString(impurity) - train(input, Classification, impurityType, maxDepth, numClasses, maxBins, Sort, - categoricalFeaturesInfo) - } - - /** - * Java-friendly API for `org.apache.spark.mllib.tree.DecisionTree.trainClassifier` - */ - @Since("1.1.0") - def trainClassifier( - input: JavaRDD[LabeledPoint], - numClasses: Int, - categoricalFeaturesInfo: java.util.Map[java.lang.Integer, java.lang.Integer], - impurity: String, - maxDepth: Int, - maxBins: Int): DecisionTreeModel = { - trainClassifier(input.rdd, numClasses, - categoricalFeaturesInfo.asInstanceOf[java.util.Map[Int, Int]].asScala.toMap, - impurity, maxDepth, maxBins) - } - - /** - * Method to train a decision tree model for regression. - * - * @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]]. - * Labels are real numbers. - * @param categoricalFeaturesInfo Map storing arity of categorical features. An entry (n to k) - * indicates that feature n is categorical with k categories - * indexed from 0: {0, 1, ..., k-1}. - * @param impurity Criterion used for information gain calculation. - * The only supported value for regression is "variance". - * @param maxDepth Maximum depth of the tree (e.g. depth 0 means 1 leaf node, depth 1 means - * 1 internal node + 2 leaf nodes). - * (suggested value: 5) - * @param maxBins Maximum number of bins used for splitting features. - * (suggested value: 32) - * @return DecisionTreeModel that can be used for prediction. - */ - @Since("1.1.0") - def trainRegressor( - input: RDD[LabeledPoint], - categoricalFeaturesInfo: Map[Int, Int], - impurity: String, - maxDepth: Int, - maxBins: Int): DecisionTreeModel = { - val impurityType = Impurities.fromString(impurity) - train(input, Regression, impurityType, maxDepth, 0, maxBins, Sort, categoricalFeaturesInfo) - } - - /** - * Java-friendly API for `org.apache.spark.mllib.tree.DecisionTree.trainRegressor` - */ - @Since("1.1.0") - def trainRegressor( - input: JavaRDD[LabeledPoint], - categoricalFeaturesInfo: java.util.Map[java.lang.Integer, java.lang.Integer], - impurity: String, - maxDepth: Int, - maxBins: Int): DecisionTreeModel = { - trainRegressor(input.rdd, - categoricalFeaturesInfo.asInstanceOf[java.util.Map[Int, Int]].asScala.toMap, - impurity, maxDepth, maxBins) - } -} diff --git a/ml-core/pom.xml b/ml-core/pom.xml index 6aad5f9..443d9b3 100644 --- a/ml-core/pom.xml +++ b/ml-core/pom.xml @@ -6,7 +6,7 @@ 4.0.0 - boostkit-ml-core_2.11 + boostkit-ml-core_2.12 2.1.0 ${project.artifactId} Spark ml core @@ -14,7 +14,7 @@ org.apache.spark - boostkit-ml-kernel-client-core_2.11 + boostkit-ml-kernel-client-core_2.12 2.1.0 ${spark.version} compile diff --git a/ml-core/src/main/scala/breeze/numerics/DigammaX.scala b/ml-core/src/main/scala/breeze/numerics/DigammaX.scala index 4fd0f02..7dce00d 100644 --- a/ml-core/src/main/scala/breeze/numerics/DigammaX.scala +++ b/ml-core/src/main/scala/breeze/numerics/DigammaX.scala @@ -1,10 +1,4 @@ // scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ /* Copyright 2012 David Hall diff --git a/ml-core/src/main/scala/org/apache/spark/ml/tree/Node.scala b/ml-core/src/main/scala/org/apache/spark/ml/tree/Node.scala deleted file mode 100644 index bbaaff8..0000000 --- a/ml-core/src/main/scala/org/apache/spark/ml/tree/Node.scala +++ /dev/null @@ -1,635 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree - -import org.apache.spark.ml.linalg.Vector -import org.apache.spark.ml.tree.impl.BinnedFeature -import org.apache.spark.mllib.tree.impurity.ImpurityCalculator -import org.apache.spark.mllib.tree.model.{ImpurityStats, - InformationGainStats => OldInformationGainStats, Node => OldNode, Predict => OldPredict} - -/** - * Decision tree node interface. - */ -sealed abstract class Node extends Serializable { - - // TODO: Add aggregate stats (once available). This will happen after we move the DecisionTree - // code into the new API and deprecate the old API. SPARK-3727 - - /** Prediction a leaf node makes, or which an internal node would make if it were a leaf node */ - def prediction: Double - - /** Impurity measure at this node (for training data) */ - def impurity: Double - - /** - * Statistics aggregated from training data at this node, used to compute prediction, impurity, - * and probabilities. - * For classification, the array of class counts must be normalized to a probability distribution. - */ - private[ml] def impurityStats: ImpurityCalculator - - /** Recursive prediction helper method */ - private[ml] def predictImpl(features: Vector): LeafNode - - private[ml] def predictImplX(binnedFeatures: Array[Int], splits: Array[Array[Split]]): LeafNode - - /** - * Get the number of nodes in tree below this node, including leaf nodes. - * E.g., if this is a leaf, returns 0. If both children are leaves, returns 2. - */ - private[tree] def numDescendants: Int - - /** - * Recursive print function. - * @param indentFactor The number of spaces to add to each level of indentation. - */ - private[tree] def subtreeToString(indentFactor: Int = 0): String - - /** - * Get depth of tree from this node. - * E.g.: Depth 0 means this is a leaf node. Depth 1 means 1 internal and 2 leaf nodes. - */ - private[tree] def subtreeDepth: Int - - /** - * Create a copy of this node in the old Node format, recursively creating child nodes as needed. - * @param id Node ID using old format IDs - */ - private[ml] def toOld(id: Int): OldNode - - /** - * Trace down the tree, and return the largest feature index used in any split. - * @return Max feature index used in a split, or -1 if there are no splits (single leaf node). - */ - private[ml] def maxSplitFeatureIndex(): Int - - /** Returns a deep copy of the subtree rooted at this node. */ - private[tree] def deepCopy(): Node -} - -private[ml] object Node { - - /** - * Create a new Node from the old Node format, recursively creating child nodes as needed. - */ - def fromOld(oldNode: OldNode, categoricalFeatures: Map[Int, Int]): Node = { - if (oldNode.isLeaf) { - // TODO: Once the implementation has been moved to this API, then include sufficient - // statistics here. - new LeafNode(prediction = oldNode.predict.predict, - impurity = oldNode.impurity, impurityStats = null) - } else { - val gain = if (oldNode.stats.nonEmpty) { - oldNode.stats.get.gain - } else { - 0.0 - } - new InternalNode(prediction = oldNode.predict.predict, impurity = oldNode.impurity, - gain = gain, leftChild = fromOld(oldNode.leftNode.get, categoricalFeatures), - rightChild = fromOld(oldNode.rightNode.get, categoricalFeatures), - split = Split.fromOld(oldNode.split.get, categoricalFeatures), impurityStats = null) - } - } -} - -/** - * Decision tree leaf node. - * @param prediction Prediction this node makes - * @param impurity Impurity measure at this node (for training data) - */ -class LeafNode private[ml] ( - override val prediction: Double, - override val impurity: Double, - override private[ml] val impurityStats: ImpurityCalculator) extends Node { - - override def toString: String = - s"LeafNode(prediction = $prediction, impurity = $impurity)" - - override private[ml] def predictImpl(features: Vector): LeafNode = this - - override private[ml] def predictImplX(binnedFeatures: Array[Int], splits: Array[Array[Split]] - ): LeafNode = this - - override private[tree] def numDescendants: Int = 0 - - override private[tree] def subtreeToString(indentFactor: Int = 0): String = { - val prefix: String = " " * indentFactor - s"$prefix" + s"Predict: $prediction\n" - } - - override private[tree] def subtreeDepth: Int = 0 - - override private[ml] def toOld(id: Int): OldNode = { - new OldNode(id, new OldPredict(prediction, prob = impurityStats.prob(prediction)), - impurity, isLeaf = true, None, None, None, None) - } - - override private[ml] def maxSplitFeatureIndex(): Int = -1 - - override private[tree] def deepCopy(): Node = { - new LeafNode(prediction, impurity, impurityStats) - } -} - -/** - * Internal Decision Tree node. - * @param prediction Prediction this node would make if it were a leaf node - * @param impurity Impurity measure at this node (for training data) - * @param gain Information gain value. Values less than 0 indicate missing values; - * this quirk will be removed with future updates. - * @param leftChild Left-hand child node - * @param rightChild Right-hand child node - * @param split Information about the test used to split to the left or right child. - */ -class InternalNode private[ml] ( - override val prediction: Double, - override val impurity: Double, - val gain: Double, - val leftChild: Node, - val rightChild: Node, - val split: Split, - override private[ml] val impurityStats: ImpurityCalculator) extends Node { - - // Note to developers: The constructor argument impurityStats should be reconsidered before we - // make the constructor public. We may be able to improve the representation. - - override def toString: String = { - s"InternalNode(prediction = $prediction, impurity = $impurity, split = $split)" - } - - override private[ml] def predictImpl(features: Vector): LeafNode = { - if (split.shouldGoLeft(features)) { - leftChild.predictImpl(features) - } else { - rightChild.predictImpl(features) - } - } - - private[ml] def predictImplX(binnedFeatures: Array[Int], splits: Array[Array[Split]] - ): LeafNode = { - if (split.shouldGoLeft(binnedFeatures(split.featureIndex).toChar, splits(split.featureIndex))) { - leftChild.predictImplX(binnedFeatures, splits) - } else { - rightChild.predictImplX(binnedFeatures, splits) - } - } - - override private[tree] def numDescendants: Int = { - 2 + leftChild.numDescendants + rightChild.numDescendants - } - - override private[tree] def subtreeToString(indentFactor: Int = 0): String = { - val prefix: String = " " * indentFactor - s"$prefix If (${InternalNode.splitToString(split, left = true)})\n" + - leftChild.subtreeToString(indentFactor + 1) + - s"$prefix Else (${InternalNode.splitToString(split, left = false)})\n" + - rightChild.subtreeToString(indentFactor + 1) - } - - override private[tree] def subtreeDepth: Int = { - 1 + math.max(leftChild.subtreeDepth, rightChild.subtreeDepth) - } - - override private[ml] def toOld(id: Int): OldNode = { - assert(id.toLong * 2 < Int.MaxValue, "Decision Tree could not be converted from new to old API" - + " since the old API does not support deep trees.") - new OldNode(id, new OldPredict(prediction, prob = impurityStats.prob(prediction)), impurity, - isLeaf = false, Some(split.toOld), Some(leftChild.toOld(OldNode.leftChildIndex(id))), - Some(rightChild.toOld(OldNode.rightChildIndex(id))), - Some(new OldInformationGainStats(gain, impurity, leftChild.impurity, rightChild.impurity, - new OldPredict(leftChild.prediction, prob = 0.0), - new OldPredict(rightChild.prediction, prob = 0.0)))) - } - - override private[ml] def maxSplitFeatureIndex(): Int = { - math.max(split.featureIndex, - math.max(leftChild.maxSplitFeatureIndex(), rightChild.maxSplitFeatureIndex())) - } - - override private[tree] def deepCopy(): Node = { - new InternalNode(prediction, impurity, gain, leftChild.deepCopy(), rightChild.deepCopy(), - split, impurityStats) - } -} - -private object InternalNode { - - /** - * Helper method for [[Node.subtreeToString()]]. - * @param split Split to print - * @param left Indicates whether this is the part of the split going to the left, - * or that going to the right. - */ - private def splitToString(split: Split, left: Boolean): String = { - val featureStr = s"feature ${split.featureIndex}" - split match { - case contSplit: ContinuousSplit => - if (left) { - s"$featureStr <= ${contSplit.threshold}" - } else { - s"$featureStr > ${contSplit.threshold}" - } - case catSplit: CategoricalSplit => - val categoriesStr = catSplit.leftCategories.mkString("{", ",", "}") - if (left) { - s"$featureStr in $categoriesStr" - } else { - s"$featureStr not in $categoriesStr" - } - } - } -} - -/** - * Version of a node used in learning. This uses vars so that we can modify nodes as we split the - * tree by adding children, etc. - * - * For now, we use node IDs. These will be kept internal since we hope to remove node IDs - * in the future, or at least change the indexing (so that we can support much deeper trees). - * - * This node can either be: - * - a leaf node, with leftChild, rightChild, split set to null, or - * - an internal node, with all values set - * - * @param id We currently use the same indexing as the old implementation in - * [[org.apache.spark.mllib.tree.model.Node]], but this will change later. - * @param isLeaf Indicates whether this node will definitely be a leaf in the learned tree, - * so that we do not need to consider splitting it further. - * @param stats Impurity statistics for this node. - */ -private[tree] class LearningNodeX( - var id: Int, - var leftChild: Option[LearningNodeX], - var rightChild: Option[LearningNodeX], - var split: Option[SplitBase], - var isLeaf: Boolean, - var stats: ImpurityStats) extends Serializable { - - /** - * Convert this [[LearningNodeX]] to a regular [[Node]], and recurse on any children. - */ - def toNode(splits: Array[Array[Split]]): Node = { - if (leftChild.nonEmpty) { - assert(rightChild.nonEmpty && split.nonEmpty && stats != null, - "Unknown error during Decision Tree learning. Could not convert LearningNodeX to Node.") - val normalSplit = Split.fromBase(split.get, splits) - new InternalNode(stats.impurityCalculator.predict, stats.impurity, stats.gain, - leftChild.get.toNode(splits), rightChild.get.toNode(splits), - normalSplit, stats.impurityCalculator) - } else { - if (stats.valid) { - new LeafNode(stats.impurityCalculator.predict, stats.impurity, - stats.impurityCalculator) - } else { - // Here we want to keep same behavior with the old mllib.DecisionTreeModel - new LeafNode(stats.impurityCalculator.predict, -1.0, stats.impurityCalculator) - } - - } - } - - /** - * Get the node index corresponding to this data point. - * This function mimics prediction, passing an example from the root node down to a leaf - * or unsplit node; that node's index is returned. - * - * @param binnedFeatures Binned feature vector for data point. - * @param splits possible splits for all features, indexed (numFeatures)(numSplits) - * @return Leaf index if the data point reaches a leaf. - * Otherwise, last node reachable in tree matching this example. - * Note: This is the global node index, i.e., the index used in the tree. - * This index is different from the index used during training a particular - * group of nodes on one call to - * [[org.apache.spark.ml.tree.impl.RandomForest.findBestSplits()]]. - */ - def predictImpl(binnedFeatures: BinnedFeature, splits: Array[Array[SplitBase]]): Int = { - if (this.isLeaf || this.split.isEmpty) { - this.id - } else { - val split = this.split.get - val featureIndex = split.featureIndex - val splitLeft = split.shouldGoLeft(binnedFeatures.get(featureIndex), splits(featureIndex)) - if (this.leftChild.isEmpty) { - // Not yet split. Return next layer of nodes to train - if (splitLeft) { - LearningNodeX.leftChildIndex(this.id) - } else { - LearningNodeX.rightChildIndex(this.id) - } - } else { - if (splitLeft) { - this.leftChild.get.predictImpl(binnedFeatures, splits) - } else { - this.rightChild.get.predictImpl(binnedFeatures, splits) - } - } - } - } - -} - -private[tree] object LearningNodeX { - - /** Create a node with some of its fields set. */ - def apply( - id: Int, - isLeaf: Boolean, - stats: ImpurityStats): LearningNodeX = { - // todo: this is a bug at spark 2.3.2 (isLeaf are always assigned false) - new LearningNodeX(id, None, None, None, isLeaf, stats) - } - - /** Create an empty node with the given node index. Values must be set later on. */ - def emptyNode(nodeIndex: Int): LearningNodeX = { - new LearningNodeX(nodeIndex, None, None, None, false, null) - } - - // The below indexing methods were copied from spark.mllib.tree.model.Node - - /** - * Return the index of the left child of this node. - */ - def leftChildIndex(nodeIndex: Int): Int = nodeIndex << 1 - - /** - * Return the index of the right child of this node. - */ - def rightChildIndex(nodeIndex: Int): Int = (nodeIndex << 1) + 1 - - /** - * Get the parent index of the given node, or 0 if it is the root. - */ - def parentIndex(nodeIndex: Int): Int = nodeIndex >> 1 - - /** - * Return the level of a tree which the given node is in. - */ - def indexToLevel(nodeIndex: Int): Int = if (nodeIndex == 0) { - throw new IllegalArgumentException(s"0 is not a valid node index.") - } else { - java.lang.Integer.numberOfTrailingZeros(java.lang.Integer.highestOneBit(nodeIndex)) - } - - /** - * Returns true if this is a left child. - * Note: Returns false for the root. - */ - def isLeftChild(nodeIndex: Int): Boolean = nodeIndex > 1 && nodeIndex % 2 == 0 - - /** - * Return the maximum number of nodes which can be in the given level of the tree. - * @param level Level of tree (0 = root). - */ - def maxNodesInLevel(level: Int): Int = 1 << level - - /** - * Return the index of the first node in the given level. - * @param level Level of tree (0 = root). - */ - def startIndexInLevel(level: Int): Int = 1 << level - - /** - * Traces down from a root node to get the node with the given node index. - * This assumes the node exists. - */ - def getNode(nodeIndex: Int, rootNode: LearningNodeX): LearningNodeX = { - var tmpNode: LearningNodeX = rootNode - var levelsToGo = indexToLevel(nodeIndex) - while (levelsToGo > 0) { - if ((nodeIndex & (1 << levelsToGo - 1)) == 0) { - tmpNode = tmpNode.leftChild.get - } else { - tmpNode = tmpNode.rightChild.get - } - levelsToGo -= 1 - } - tmpNode - } - -} - -/** - * Version of a node used in learning. This uses vars so that we can modify nodes as we split the - * tree by adding children, etc. - * - * For now, we use node IDs. These will be kept internal since we hope to remove node IDs - * in the future, or at least change the indexing (so that we can support much deeper trees). - * - * This node can either be: - * - a leaf node, with leftChild, rightChild, split set to null, or - * - an internal node, with all values set - * - * @param id We currently use the same indexing as the old implementation in - * [[org.apache.spark.mllib.tree.model.Node]], but this will change later. - * @param isLeaf Indicates whether this node will definitely be a leaf in the learned tree, - * so that we do not need to consider splitting it further. - * @param stats Impurity statistics for this node. - */ -private[tree] class LearningNode( - var id: Int, - var leftChild: Option[LearningNode], - var rightChild: Option[LearningNode], - var split: Option[Split], - var isLeaf: Boolean, - var stats: ImpurityStats) extends Serializable { - - /** - * Convert this [[LearningNode]] to a regular [[Node]], and recurse on any children. - */ - def toNode: Node = { - if (leftChild.nonEmpty) { - assert(rightChild.nonEmpty && split.nonEmpty && stats != null, - "Unknown error during Decision Tree learning. Could not convert LearningNode to Node.") - new InternalNode(stats.impurityCalculator.predict, stats.impurity, stats.gain, - leftChild.get.toNode, rightChild.get.toNode, split.get, stats.impurityCalculator) - } else { - if (stats.valid) { - new LeafNode(stats.impurityCalculator.predict, stats.impurity, - stats.impurityCalculator) - } else { - // Here we want to keep same behavior with the old mllib.DecisionTreeModel - new LeafNode(stats.impurityCalculator.predict, -1.0, stats.impurityCalculator) - } - - } - } - - /** - * Get the node index corresponding to this data point. - * This function mimics prediction, passing an example from the root node down to a leaf - * or unsplit node; that node's index is returned. - * - * @param binnedFeatures Binned feature vector for data point. - * @param splits possible splits for all features, indexed (numFeatures)(numSplits) - * @return Leaf index if the data point reaches a leaf. - * Otherwise, last node reachable in tree matching this example. - * Note: This is the global node index, i.e., the index used in the tree. - * This index is different from the index used during training a particular - * group of nodes on one call to - * [[org.apache.spark.ml.tree.impl.RandomForest.findBestSplits()]]. - */ - def predictImpl(binnedFeatures: Array[Int], splits: Array[Array[Split]]): Int = { - if (this.isLeaf || this.split.isEmpty) { - this.id - } else { - val split = this.split.get - val featureIndex = split.featureIndex - val splitLeft = split.shouldGoLeft(binnedFeatures(featureIndex), splits(featureIndex)) - if (this.leftChild.isEmpty) { - // Not yet split. Return next layer of nodes to train - if (splitLeft) { - LearningNode.leftChildIndex(this.id) - } else { - LearningNode.rightChildIndex(this.id) - } - } else { - if (splitLeft) { - this.leftChild.get.predictImpl(binnedFeatures, splits) - } else { - this.rightChild.get.predictImpl(binnedFeatures, splits) - } - } - } - } - - /** - * Get the node index corresponding to this data point. - * This function mimics prediction, passing an example from the root node down to a leaf - * or unsplit node; that node's index is returned. - * - * @param binnedFeatures Binned feature vector for data point. - * @param splits possible splits for all features, indexed (numFeatures)(numSplits) - * @return Leaf index if the data point reaches a leaf. - * Otherwise, last node reachable in tree matching this example. - * Note: This is the global node index, i.e., the index used in the tree. - * This index is different from the index used during training a particular - * group of nodes on one call to - * [[org.apache.spark.ml.tree.impl.RandomForest.findBestSplits()]]. - */ - def predictImpl(binnedFeatures: BinnedFeature, splits: Array[Array[Split]]): Int = { - if (this.isLeaf || this.split.isEmpty) { - this.id - } else { - val split = this.split.get - val featureIndex = split.featureIndex - val splitLeft = split.shouldGoLeft(binnedFeatures.get(featureIndex), splits(featureIndex)) - if (this.leftChild.isEmpty) { - // Not yet split. Return next layer of nodes to train - if (splitLeft) { - LearningNode.leftChildIndex(this.id) - } else { - LearningNode.rightChildIndex(this.id) - } - } else { - if (splitLeft) { - this.leftChild.get.predictImpl(binnedFeatures, splits) - } else { - this.rightChild.get.predictImpl(binnedFeatures, splits) - } - } - } - } - -} - - -private[tree] object LearningNode { - - /** Create a node with some of its fields set. */ - def apply( - id: Int, - isLeaf: Boolean, - stats: ImpurityStats): LearningNode = { - // todo: this is a bug at spark 2.3.2 (isLeaf are always assigned false) - new LearningNode(id, None, None, None, isLeaf, stats) - } - - /** Create an empty node with the given node index. Values must be set later on. */ - def emptyNode(nodeIndex: Int): LearningNode = { - new LearningNode(nodeIndex, None, None, None, false, null) - } - - // The below indexing methods were copied from spark.mllib.tree.model.Node - - /** - * Return the index of the left child of this node. - */ - def leftChildIndex(nodeIndex: Int): Int = nodeIndex << 1 - - /** - * Return the index of the right child of this node. - */ - def rightChildIndex(nodeIndex: Int): Int = (nodeIndex << 1) + 1 - - /** - * Get the parent index of the given node, or 0 if it is the root. - */ - def parentIndex(nodeIndex: Int): Int = nodeIndex >> 1 - - /** - * Return the level of a tree which the given node is in. - */ - def indexToLevel(nodeIndex: Int): Int = if (nodeIndex == 0) { - throw new IllegalArgumentException(s"0 is not a valid node index.") - } else { - java.lang.Integer.numberOfTrailingZeros(java.lang.Integer.highestOneBit(nodeIndex)) - } - - /** - * Returns true if this is a left child. - * Note: Returns false for the root. - */ - def isLeftChild(nodeIndex: Int): Boolean = nodeIndex > 1 && nodeIndex % 2 == 0 - - /** - * Return the maximum number of nodes which can be in the given level of the tree. - * @param level Level of tree (0 = root). - */ - def maxNodesInLevel(level: Int): Int = 1 << level - - /** - * Return the index of the first node in the given level. - * @param level Level of tree (0 = root). - */ - def startIndexInLevel(level: Int): Int = 1 << level - - /** - * Traces down from a root node to get the node with the given node index. - * This assumes the node exists. - */ - def getNode(nodeIndex: Int, rootNode: LearningNode): LearningNode = { - var tmpNode: LearningNode = rootNode - var levelsToGo = indexToLevel(nodeIndex) - while (levelsToGo > 0) { - if ((nodeIndex & (1 << levelsToGo - 1)) == 0) { - tmpNode = tmpNode.leftChild.get - } else { - tmpNode = tmpNode.rightChild.get - } - levelsToGo -= 1 - } - tmpNode - } -} diff --git a/ml-core/src/main/scala/org/apache/spark/ml/tree/Split.scala b/ml-core/src/main/scala/org/apache/spark/ml/tree/Split.scala deleted file mode 100644 index dbea593..0000000 --- a/ml-core/src/main/scala/org/apache/spark/ml/tree/Split.scala +++ /dev/null @@ -1,274 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree - -import java.util.Objects - -import org.apache.spark.annotation.Since -import org.apache.spark.ml.linalg.Vector -import org.apache.spark.mllib.tree.configuration.{FeatureType => OldFeatureType} -import org.apache.spark.mllib.tree.model.{Split => OldSplit} - - -/** - * Interface for a "Split," which specifies a test made at a decision tree node - * to choose the left or right path. - */ -sealed trait SplitBase extends Serializable { - - /** Index of feature which this split tests */ - def featureIndex: Int - - /** - * Return true (split to left) or false (split to right). - * @param binnedFeature Binned feature value. - * @param splits All splits for the given feature. - */ - private[tree] def shouldGoLeft(binnedFeature: Char, splits: Array[SplitBase]): Boolean -} - -sealed trait Split extends SplitBase { - /** - * Return true (split to left) or false (split to right). - * @param features Vector of features (original values, not binned). - */ - private[ml] def shouldGoLeft(features: Vector): Boolean - - /** - * Return true (split to left) or false (split to right). - * @param binnedFeature Binned feature value. - * @param splits All splits for the given feature. - */ - private[tree] def shouldGoLeft(binnedFeature: Int, splits: Array[Split]): Boolean - - /** Convert to old Split format */ - private[tree] def toOld: OldSplit -} - -private[tree] object Split { - - def fromOld(oldSplit: OldSplit, categoricalFeatures: Map[Int, Int]): Split = { - oldSplit.featureType match { - case OldFeatureType.Categorical => - new CategoricalSplit(featureIndex = oldSplit.feature, - _leftCategories = oldSplit.categories.toArray, categoricalFeatures(oldSplit.feature)) - case OldFeatureType.Continuous => - new ContinuousSplit(featureIndex = oldSplit.feature, threshold = oldSplit.threshold) - } - } - - def toBase(split: Split, binIdx: Int): SplitBase = { - split match { - case value: CategoricalSplit => - value - case value: ContinuousSplit => - new ContinuousSplitLearning(value.featureIndex, binIdx) - } - } - - def fromBase(baseSplit: SplitBase, splits: Array[Array[Split]]): Split = { - baseSplit match { - case value: CategoricalSplit => - value - case value: ContinuousSplit => - value - case value: ContinuousSplitLearning => - val thresh = - splits(value.featureIndex)(value.binIndex).asInstanceOf[ContinuousSplit].threshold - new ContinuousSplit(value.featureIndex, thresh) - } - } -} - -/** - * Split which tests a categorical feature. - * @param featureIndex Index of the feature to test - * @param _leftCategories If the feature value is in this set of categories, then the split goes - * left. Otherwise, it goes right. - * @param numCategories Number of categories for this feature. - */ -class CategoricalSplit private[ml] ( - override val featureIndex: Int, - _leftCategories: Array[Double], - @Since("2.0.0") val numCategories: Int) - extends Split { - - require(_leftCategories.forall(cat => 0 <= cat && cat < numCategories), "Invalid leftCategories" + - s" (should be in range [0, $numCategories)): ${_leftCategories.mkString(",")}") - - /** - * If true, then "categories" is the set of categories for splitting to the left, and vice versa. - */ - private val isLeft: Boolean = _leftCategories.length <= numCategories / 2 - - /** Set of categories determining the splitting rule, along with [[isLeft]]. */ - private val categories: Set[Double] = { - if (isLeft) { - _leftCategories.toSet - } else { - setComplement(_leftCategories.toSet) - } - } - - override private[ml] def shouldGoLeft(features: Vector): Boolean = { - if (isLeft) { - categories.contains(features(featureIndex)) - } else { - !categories.contains(features(featureIndex)) - } - } - - override private[tree] def shouldGoLeft(binnedFeature: Int, splits: Array[Split]): Boolean = { - if (isLeft) { - categories.contains(binnedFeature.toDouble) - } else { - !categories.contains(binnedFeature.toDouble) - } - } - - override private[tree] def shouldGoLeft( - binnedFeature: Char, - splits: Array[SplitBase]): Boolean = { - if (isLeft) { - categories.contains(binnedFeature.toDouble) - } else { - !categories.contains(binnedFeature.toDouble) - } - } - - override def hashCode(): Int = { - val state = Seq(featureIndex, isLeft, categories) - state.map(Objects.hashCode).foldLeft(0)((a, b) => 31 * a + b) - } - - override def equals(o: Any): Boolean = o match { - case other: CategoricalSplit => featureIndex == other.featureIndex && - isLeft == other.isLeft && categories == other.categories - case _ => false - } - - override private[tree] def toOld: OldSplit = { - val oldCats = if (isLeft) { - categories - } else { - setComplement(categories) - } - OldSplit(featureIndex, threshold = 0.0, OldFeatureType.Categorical, oldCats.toList) - } - - /** Get sorted categories which split to the left */ - def leftCategories: Array[Double] = { - val cats = if (isLeft) categories else setComplement(categories) - cats.toArray.sorted - } - - /** Get sorted categories which split to the right */ - def rightCategories: Array[Double] = { - val cats = if (isLeft) setComplement(categories) else categories - cats.toArray.sorted - } - - /** [0, numCategories) \ cats */ - private def setComplement(cats: Set[Double]): Set[Double] = { - Range(0, numCategories).map(_.toDouble).filter(cat => !cats.contains(cat)).toSet - } -} - -/** - * Split which tests a continuous feature. - * @param featureIndex Index of the feature to test - * @param threshold If the feature value is less than or equal to this threshold, then the - * split goes left. Otherwise, it goes right. - */ -class ContinuousSplit private[ml] (override val featureIndex: Int, val threshold: Double) - extends Split { - - override private[ml] def shouldGoLeft(features: Vector): Boolean = { - features(featureIndex) <= threshold - } - - override private[tree] def shouldGoLeft(binnedFeature: Int, splits: Array[Split]): Boolean = { - if (binnedFeature == splits.length) { - // > last split, so split right - false - } else { - val featureValueUpperBound = splits(binnedFeature).asInstanceOf[ContinuousSplit].threshold - featureValueUpperBound <= threshold - } - } - - override private[tree] def shouldGoLeft( - binnedFeature: Char, - splits: Array[SplitBase]): Boolean = { - - if (binnedFeature == splits.length) { - // > last split, so split right - false - } else { - val featureValueUpperBound = splits(binnedFeature).asInstanceOf[ContinuousSplit].threshold - featureValueUpperBound <= threshold - } - } - - override def equals(o: Any): Boolean = { - o match { - case other: ContinuousSplit => - featureIndex == other.featureIndex && threshold == other.threshold - case _ => - false - } - } - - override def hashCode(): Int = { - val state = Seq(featureIndex, threshold) - state.map(Objects.hashCode).foldLeft(0)((a, b) => 31 * a + b) - } - - override private[tree] def toOld: OldSplit = { - OldSplit(featureIndex, threshold, OldFeatureType.Continuous, List.empty[Double]) - } -} - -/** - * Split which tests a continuous feature. - * @param featureIndex Index of the feature to test - * @param binIndex If the binned feature value is less than or equal to this bin index, then the - * split goes left. Otherwise, it goes right. - */ -class ContinuousSplitLearning private[ml] (override val featureIndex: Int, val binIndex: Int) - extends SplitBase { - - override private[tree] def shouldGoLeft( - binnedFeature: Char, - splits: Array[SplitBase]): Boolean = { - - if (binnedFeature == splits.length) { - // > last split, so split right - false - } else { - binnedFeature <= binIndex - } - } - -} diff --git a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/BaggedPoint.scala b/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/BaggedPoint.scala deleted file mode 100644 index c2f8822..0000000 --- a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/BaggedPoint.scala +++ /dev/null @@ -1,135 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree.impl - -import org.apache.commons.math3.distribution.PoissonDistribution - -import org.apache.spark.rdd.RDD -import org.apache.spark.util.Utils -import org.apache.spark.util.random.XORShiftRandom - -/** - * Internal representation of a datapoint which belongs to several subsamples of the same dataset, - * particularly for bagging (e.g., for random forests). - * - * This holds one instance, as well as an array of weights which represent the (weighted) - * number of times which this instance appears in each subsamplingRate. - * E.g., (datum, [1, 0, 4]) indicates that there are 3 subsamples of the dataset and that - * this datum has 1 copy, 0 copies, and 4 copies in the 3 subsamples, respectively. - * - * @param datum Data instance - * @param subsampleWeights Weight of this instance in each subsampled dataset. - * @param sampleId ID of sample - * - * TODO: This does not currently support (Double) weighted instances. Once MLlib has weighted - * dataset support, update. (We store subsampleWeights as Double for this future extension.) - */ -private[spark] class BaggedPoint[Datum]( - val datum: Datum, - val subsampleWeights: Array[Int], - var sampleId: Short = 0) - extends Serializable - -private[spark] object BaggedPoint { - - /** - * Convert an input dataset into its BaggedPoint representation, - * choosing subsamplingRate counts for each instance. - * Each subsamplingRate has the same number of instances as the original dataset, - * and is created by subsampling without replacement. - * @param input Input dataset. - * @param subsamplingRate Fraction of the training data used for learning decision tree. - * @param numSubsamples Number of subsamples of this RDD to take. - * @param withReplacement Sampling with/without replacement. - * @param seed Random seed. - * @return BaggedPoint dataset representation. - */ - def convertToBaggedRDD[Datum] ( - input: RDD[Datum], - subsamplingRate: Double, - numSubsamples: Int, - withReplacement: Boolean, - seed: Long = Utils.random.nextLong()): RDD[BaggedPoint[Datum]] = { - if (withReplacement) { - convertToBaggedRDDSamplingWithReplacement(input, subsamplingRate, numSubsamples, seed) - } else { - if (numSubsamples == 1 && subsamplingRate == 1.0) { - convertToBaggedRDDWithoutSampling(input) - } else { - convertToBaggedRDDSamplingWithoutReplacement(input, subsamplingRate, numSubsamples, seed) - } - } - } - - private def convertToBaggedRDDSamplingWithoutReplacement[Datum] ( - input: RDD[Datum], - subsamplingRate: Double, - numSubsamples: Int, - seed: Long): RDD[BaggedPoint[Datum]] = { - input.mapPartitionsWithIndex { (partitionIndex, instances) => - // Use random seed = seed + partitionIndex + 1 to make generation reproducible. - val rng = new XORShiftRandom - rng.setSeed(seed + partitionIndex + 1) - instances.map { instance => - val subsampleWeights = new Array[Int](numSubsamples) - var subsampleIndex = 0 - while (subsampleIndex < numSubsamples) { - val x = rng.nextDouble() - subsampleWeights(subsampleIndex) = { - if (x < subsamplingRate) 1 else 0 - } - subsampleIndex += 1 - } - new BaggedPoint(instance, subsampleWeights) - } - } - } - - private def convertToBaggedRDDSamplingWithReplacement[Datum] ( - input: RDD[Datum], - subsample: Double, - numSubsamples: Int, - seed: Long): RDD[BaggedPoint[Datum]] = { - input.mapPartitionsWithIndex { (partitionIndex, instances) => - // Use random seed = seed + partitionIndex + 1 to make generation reproducible. - val poisson = new PoissonDistribution(subsample) - poisson.reseedRandomGenerator(seed + partitionIndex + 1) - instances.map { instance => - val subsampleWeights = new Array[Int](numSubsamples) - var subsampleIndex = 0 - while (subsampleIndex < numSubsamples) { - subsampleWeights(subsampleIndex) = poisson.sample() - subsampleIndex += 1 - } - new BaggedPoint(instance, subsampleWeights) - } - } - } - - private def convertToBaggedRDDWithoutSampling[Datum] ( - input: RDD[Datum]): RDD[BaggedPoint[Datum]] = { - input.map(datum => new BaggedPoint(datum, Array(1))) - } - -} diff --git a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/DTFeatureStatsAggregator.scala b/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/DTFeatureStatsAggregator.scala deleted file mode 100644 index 3d98a41..0000000 --- a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/DTFeatureStatsAggregator.scala +++ /dev/null @@ -1,111 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree.impl - -import org.apache.spark.mllib.tree.impurity._ - - -/** - * DecisionTree statistics aggregator for a feature for a node. - * This class is abstract to support learning with and without feature subsampling. - */ -private[spark] class DTFeatureStatsAggregator( - val metadata: DecisionTreeMetadata, - val _featureIndex: Int) extends Serializable { - - /** - * [[ImpurityAggregator]] instance specifying the impurity type. - */ - - val impurityAggregator = new VarianceAggregator() - - val featureIndex: Int = _featureIndex - - /** - * Number of elements (Double values) used for the sufficient statistics of each bin. - */ - private val statsSize: Int = impurityAggregator.statsSize - - /** - * Number of bins for the feature. - */ - private val numBins: Int = { - metadata.numBins(featureIndex) - } - - /** - * Total number of elements stored in this aggregator - */ - private val allStatsSize: Int = numBins * statsSize - - /** - * Flat array of elements. - */ - private val allStats: Array[Double] = new Array[Double](allStatsSize) - - /** - * Array of parent node sufficient stats. - */ - private val parentStats: Array[Double] = new Array[Double](statsSize) - - /** - * Get an [[ImpurityCalculator]] for a given (node, feature, bin). - */ - def getImpurityCalculator(featureOffset: Int, binIndex: Int): ImpurityCalculator = { - impurityAggregator.getCalculator(allStats, binIndex * statsSize) - } - - /** - * Get an [[ImpurityCalculator]] for the parent node. - */ - def getParentImpurityCalculator(): ImpurityCalculator = { - impurityAggregator.getCalculator(parentStats, 0) - } - - /** - * Update the stats for a given bin for ordered features, using the given label. - */ - def updateX(featureIndex: Int, binIndex: Int, label: Double): Unit = { - val i = binIndex * statsSize - impurityAggregator.updateX(allStats, i, label) - } - - /** - * Pre-compute feature offset for use with [[featureUpdate]]. - * For ordered features only. - */ - def getFeatureOffset(featureIndex: Int): Int = 0 - - /** - * For a given feature, merge the stats for two bins. - * - * @param featureOffset This is a pre-computed feature offset - * from [[getFeatureOffset]]. - * @param binIndex The other bin is merged into this bin. - * @param otherBinIndex This bin is not modified. X - */ - def mergeForFeature(featureOffset: Int, binIndex: Int, otherBinIndex: Int): Unit = { - impurityAggregator.merge(allStats, binIndex * statsSize, otherBinIndex * statsSize) - } -} diff --git a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/DTStatsAggregator.scala b/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/DTStatsAggregator.scala deleted file mode 100644 index e18154f..0000000 --- a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/DTStatsAggregator.scala +++ /dev/null @@ -1,187 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree.impl - -import org.apache.spark.mllib.tree.impurity._ - - - -/** - * DecisionTree statistics aggregator for a node. - * This holds a flat array of statistics for a set of (features, bins) - * and helps with indexing. - * This class is abstract to support learning with and without feature subsampling. - */ -private[spark] class DTStatsAggregator( - val metadata: DecisionTreeMetadata, - featureSubset: Option[Array[Int]]) extends Serializable { - - /** - * [[ImpurityAggregator]] instance specifying the impurity type. - */ - val impurityAggregator: ImpurityAggregator = metadata.impurity match { - case Gini => new GiniAggregator(metadata.numClasses) - case Entropy => new EntropyAggregator(metadata.numClasses) - case Variance => new VarianceAggregator() - case _ => throw new IllegalArgumentException(s"Bad impurity parameter: ${metadata.impurity}") - } - - /** - * Number of elements (Double values) used for the sufficient statistics of each bin. - */ - private val statsSize: Int = impurityAggregator.statsSize - - /** - * Number of bins for each feature. This is indexed by the feature index. - */ - private val numBins: Array[Int] = { - if (featureSubset.isDefined) { - featureSubset.get.map(metadata.numBins(_)) - } else { - metadata.numBins - } - } - - /** - * Offset for each feature for calculating indices into the [[allStats]] array. - */ - private val featureOffsets: Array[Int] = { - numBins.scanLeft(0)((total, nBins) => total + statsSize * nBins) - } - - /** - * Total number of elements stored in this aggregator - */ - private val allStatsSize: Int = featureOffsets.last - - /** - * Flat array of elements. - * Index for start of stats for a (feature, bin) is: - * index = featureOffsets(featureIndex) + binIndex * statsSize - */ - private val allStats: Array[Double] = new Array[Double](allStatsSize) - - /** - * Array of parent node sufficient stats. - * Note: parent stats need to be explicitly tracked in the [[DTStatsAggregator]] for unordered - * categorical features, because the parent [[Node]] object does not have [[ImpurityStats]] - * on the first iteration. - */ - private val parentStats: Array[Double] = new Array[Double](statsSize) - - /** - * Get an [[ImpurityCalculator]] for a given (node, feature, bin). - * - * @param featureOffset This is a pre-computed (node, feature) offset - * from [[getFeatureOffset]]. - */ - def getImpurityCalculator(featureOffset: Int, binIndex: Int): ImpurityCalculator = { - impurityAggregator.getCalculator(allStats, featureOffset + binIndex * statsSize) - } - - /** - * Get an [[ImpurityCalculator]] for the parent node. - */ - def getParentImpurityCalculator(): ImpurityCalculator = { - impurityAggregator.getCalculator(parentStats, 0) - } - - /** - * Update the stats for a given (feature, bin) for ordered features, using the given label. - */ - def update(featureIndex: Int, binIndex: Int, label: Double, instanceWeight: Int): Unit = { - val i = featureOffsets(featureIndex) + binIndex * statsSize - impurityAggregator.update(allStats, i, label, instanceWeight) - } - - /** - * Update the parent node stats using the given label. - */ - def updateParent(label: Double, instanceWeight: Int): Unit = { - impurityAggregator.update(parentStats, 0, label, instanceWeight) - } - - /** - * Faster version of [[update]]. - * Update the stats for a given (feature, bin), using the given label. - * - * @param featureOffset This is a pre-computed feature offset - * from [[getFeatureOffset]]. - */ - def featureUpdate( - featureOffset: Int, - binIndex: Int, - label: Double, - instanceWeight: Int): Unit = { - impurityAggregator.update(allStats, featureOffset + binIndex * statsSize, - label, instanceWeight) - } - - /** - * Pre-compute feature offset for use with [[featureUpdate]]. - * For ordered features only. - */ - def getFeatureOffset(featureIndex: Int): Int = featureOffsets(featureIndex) - - /** - * For a given feature, merge the stats for two bins. - * - * @param featureOffset This is a pre-computed feature offset - * from [[getFeatureOffset]]. - * @param binIndex The other bin is merged into this bin. - * @param otherBinIndex This bin is not modified. - */ - def mergeForFeature(featureOffset: Int, binIndex: Int, otherBinIndex: Int): Unit = { - impurityAggregator.merge(allStats, featureOffset + binIndex * statsSize, - featureOffset + otherBinIndex * statsSize) - } - - /** - * Merge this aggregator with another, and returns this aggregator. - * This method modifies this aggregator in-place. - */ - def merge(other: DTStatsAggregator): DTStatsAggregator = { - require(allStatsSize == other.allStatsSize, - s"DTStatsAggregator.merge requires that both aggregators have the same length stats vectors." - + s" This aggregator is of length $allStatsSize, but the other is ${other.allStatsSize}.") - var i = 0 - // TODO: Test BLAS.axpy - while (i < allStatsSize) { - allStats(i) += other.allStats(i) - i += 1 - } - - require(statsSize == other.statsSize, - s"DTStatsAggregator.merge requires that both aggregators have the same length parent " + - s"stats vectors. This aggregator's parent stats are length $statsSize, " + - s"but the other is ${other.statsSize}.") - var j = 0 - while (j < statsSize) { - parentStats(j) += other.parentStats(j) - j += 1 - } - - this - } -} diff --git a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTreesCore.scala b/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTreesCore.scala deleted file mode 100644 index 24e1930..0000000 --- a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTreesCore.scala +++ /dev/null @@ -1,256 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.ml.tree.impl - -import it.unimi.dsi.fastutil.objects.ObjectArrayList - -import org.apache.spark.internal.Logging -import org.apache.spark.ml.tree.{CategoricalSplit, ContinuousSplit, LearningNode, Split} -import org.apache.spark.mllib.tree.impurity.ImpurityCalculator -import org.apache.spark.mllib.tree.model.ImpurityStats - -object GradientBoostedTreesCore extends Logging{ - private[tree] class NodeIndexInfo( - val nodeIndexInGroup: Int, - val featureSubset: Option[Array[Int]], - val featureSubsetHashSetX: Option[scala.collection.mutable.HashSet[Int]] = None) - extends Serializable - - /** - * Calculate the impurity statistics for a given (feature, split) based upon left/right - * aggregates. - * - * @param stats the recycle impurity statistics for this feature's all splits, - * only 'impurity' and 'impurityCalculator' are valid between each iteration - * @param leftImpurityCalculator left node aggregates for this (feature, split) - * @param rightImpurityCalculator right node aggregate for this (feature, split) - * @param metadata learning and dataset metadata for DecisionTree - * @return Impurity statistics for this (feature, split) - */ - private def calculateImpurityStats( - stats: ImpurityStats, - leftImpurityCalculator: ImpurityCalculator, - rightImpurityCalculator: ImpurityCalculator, - metadata: DecisionTreeMetadata): ImpurityStats = { - - val parentImpurityCalculator: ImpurityCalculator = if (stats == null) { - leftImpurityCalculator.copy.add(rightImpurityCalculator) - } else { - stats.impurityCalculator - } - - val impurity: Double = if (stats == null) { - parentImpurityCalculator.calculate() - } else { - stats.impurity - } - - val leftCount = leftImpurityCalculator.count - val rightCount = rightImpurityCalculator.count - - val totalCount = leftCount + rightCount - - // If left child or right child doesn't satisfy minimum instances per node, - // then this split is invalid, return invalid information gain stats. - if ((leftCount < metadata.minInstancesPerNode) || - (rightCount < metadata.minInstancesPerNode)) { - return ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator) - } - - val leftImpurity = leftImpurityCalculator.calculate() // Note: This equals 0 if count = 0 - val rightImpurity = rightImpurityCalculator.calculate() - - val leftWeight = leftCount / totalCount.toDouble - val rightWeight = rightCount / totalCount.toDouble - - val gain = impurity - leftWeight * leftImpurity - rightWeight * rightImpurity - - // if information gain doesn't satisfy minimum information gain, - // then this split is invalid, return invalid information gain stats. - if (gain < metadata.minInfoGain) { - return ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator) - } - - new ImpurityStats(gain, impurity, parentImpurityCalculator, - leftImpurityCalculator, rightImpurityCalculator) - } - - /** - * Find the best split for a node. - * - * @param binAggregates Bin statistics. - * @return tuple for best split: (Split, information gain, prediction at node) - */ - private[tree] def binsToBestSplitX( - binAggregates: DTFeatureStatsAggregator, - splits: ObjectArrayList[Split], - featureIndex: Int, - node: LearningNode): (Split, ImpurityStats) = { - - // Calculate InformationGain and ImpurityStats if current node is top node - val level = LearningNode.indexToLevel(node.id) - var gainAndImpurityStats: ImpurityStats = if (level == 0) { - null - } else { - node.stats - } - - if (binAggregates.metadata.numSplits(featureIndex) != 0) { - val featureIndexIdx = featureIndex - val numSplits = binAggregates.metadata.numSplits(featureIndex) - if (binAggregates.metadata.isContinuous(featureIndex)) { - // Cumulative sum (scanLeft) of bin statistics. - // Afterwards, binAggregates for a bin is the sum of aggregates for - // that bin + all preceding bins. - var splitIndex = 0 - while (splitIndex < numSplits) { - binAggregates.mergeForFeature(0, splitIndex + 1, splitIndex) - splitIndex += 1 - } - // Find best split. - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { case splitIdx => - val leftChildStats = binAggregates.getImpurityCalculator(0, splitIdx) - val rightChildStats = - binAggregates.getImpurityCalculator(0, numSplits) - rightChildStats.subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIdx, gainAndImpurityStats) - }.maxBy(_._2.gain) - (splits.get(bestFeatureSplitIndex), bestFeatureGainStats) - } else if (binAggregates.metadata.isUnordered(featureIndex)) { - // unreachable for GBDT - // Unordered categorical feature - // val leftChildOffset = binAggregates.getFeatureOffset(featureIndexIdx) - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { splitIndex => - val leftChildStats = binAggregates.getImpurityCalculator(0, splitIndex) - val rightChildStats = binAggregates.getImpurityCalculator(0, numSplits) - .subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIndex, gainAndImpurityStats) - }.maxBy(_._2.gain) - (splits.get(bestFeatureSplitIndex), bestFeatureGainStats) - } else { - // Ordered categorical feature - val numCategories = binAggregates.metadata.numBins(featureIndex) - - /* Each bin is one category (feature value). - * The bins are ordered based on centroidForCategories, and this ordering determines which - * splits are considered. (With K categories, we consider K - 1 possible splits.) - * - * centroidForCategories is a list: (category, centroid) - */ - val centroidForCategories = Range(0, numCategories).map { case featureValue => - val categoryStats = - binAggregates.getImpurityCalculator(0, featureValue) - val centroid = if (categoryStats.count != 0) { - if (binAggregates.metadata.isMulticlass) { - // unreachable for GBDT - // multiclass classification - // For categorical variables in multiclass classification, - // the bins are ordered by the impurity of their corresponding labels. - categoryStats.calculate() - } else if (binAggregates.metadata.isClassification) { - // unreachable for GBDT - // binary classification - // For categorical variables in binary classification, - // the bins are ordered by the count of class 1. - categoryStats.stats(1) - } else { - // regression - // For categorical variables in regression and binary classification, - // the bins are ordered by the prediction. - categoryStats.predict - } - } else { - Double.MaxValue - } - (featureValue, centroid) - } - - logDebug(s"Centroids for categorical variable: ${centroidForCategories.mkString(",")}") - - // bins sorted by centroids - val categoriesSortedByCentroid = centroidForCategories.toList.sortBy(_._2) - - logDebug("Sorted centroids for categorical variable = " + - categoriesSortedByCentroid.mkString(",")) - - // Cumulative sum (scanLeft) of bin statistics. - // Afterwards, binAggregates for a bin is the sum of aggregates for - // that bin + all preceding bins. - var splitIndex = 0 - while (splitIndex < numSplits) { - val currentCategory = categoriesSortedByCentroid(splitIndex)._1 - val nextCategory = categoriesSortedByCentroid(splitIndex + 1)._1 - binAggregates.mergeForFeature(0, nextCategory, currentCategory) - splitIndex += 1 - } - // lastCategory = index of bin with total aggregates for this (node, feature) - val lastCategory = categoriesSortedByCentroid.last._1 - // Find best split. - val (bestFeatureSplitIndex, bestFeatureGainStats) = - Range(0, numSplits).map { splitIndex => - val featureValue = categoriesSortedByCentroid(splitIndex)._1 - val leftChildStats = - binAggregates.getImpurityCalculator(0, featureValue) - val rightChildStats = - binAggregates.getImpurityCalculator(0, lastCategory) - rightChildStats.subtract(leftChildStats) - gainAndImpurityStats = calculateImpurityStats(gainAndImpurityStats, - leftChildStats, rightChildStats, binAggregates.metadata) - (splitIndex, gainAndImpurityStats) - }.maxBy(_._2.gain) - val categoriesForSplit = - categoriesSortedByCentroid.map(_._1.toDouble).slice(0, bestFeatureSplitIndex + 1) - val bestFeatureSplit = - new CategoricalSplit(featureIndex, categoriesForSplit.toArray, numCategories) - (bestFeatureSplit, bestFeatureGainStats) - } - } else { - // If no valid splits for features, then this split is invalid, - // return invalid information gain stats. Take any split and continue. - // Splits is empty, so arbitrarily choose to split on any threshold - // val parentImpurityCalculator = binAggregates.getParentImpurityCalculator() - // No split, no need to merge - val featureIndexIdx = featureIndex - val numSplits = binAggregates.metadata.numSplits(featureIndex) - val parentImpurityCalculator = binAggregates.getImpurityCalculator(0, numSplits) - if (binAggregates.metadata.isContinuous(featureIndex)) { - (new ContinuousSplit(featureIndex, 0), - ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator)) - } else { - // Seems like unreachable for GBDT (as well as RF) - val numCategories = binAggregates.metadata.featureArity(featureIndex) - (new CategoricalSplit(featureIndex, Array(), numCategories), - ImpurityStats.getInvalidImpurityStats(parentImpurityCalculator)) - } - } - - // For each (feature, split), calculate the gain, and select the best (feature, split). - } -} diff --git a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/TreePointX.scala b/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/TreePointX.scala index 7329562..0b8392a 100644 --- a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/TreePointX.scala +++ b/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/TreePointX.scala @@ -1,9 +1,3 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with diff --git a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/TreePointY.scala b/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/TreePointY.scala index 036cb97..4272edb 100644 --- a/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/TreePointY.scala +++ b/ml-core/src/main/scala/org/apache/spark/ml/tree/impl/TreePointY.scala @@ -1,9 +1,3 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with diff --git a/ml-core/src/main/scala/org/apache/spark/mllib/clustering/LDAUtilsX.scala b/ml-core/src/main/scala/org/apache/spark/mllib/clustering/LDAUtilsX.scala index 4a30b9e..fc9420a 100644 --- a/ml-core/src/main/scala/org/apache/spark/mllib/clustering/LDAUtilsX.scala +++ b/ml-core/src/main/scala/org/apache/spark/mllib/clustering/LDAUtilsX.scala @@ -1,9 +1,3 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with diff --git a/ml-core/src/main/scala/org/apache/spark/mllib/clustering/OnlineLDAOptimizerXObj.scala b/ml-core/src/main/scala/org/apache/spark/mllib/clustering/OnlineLDAOptimizerXObj.scala index 50f3337..c2b1589 100644 --- a/ml-core/src/main/scala/org/apache/spark/mllib/clustering/OnlineLDAOptimizerXObj.scala +++ b/ml-core/src/main/scala/org/apache/spark/mllib/clustering/OnlineLDAOptimizerXObj.scala @@ -1,9 +1,3 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -25,7 +19,7 @@ package org.apache.spark.mllib.clustering import breeze.linalg.{sum, DenseMatrix => BDM, DenseVector => BDV} import breeze.numerics.{abs, exp} -import breeze.stats.distributions.Gamma +import breeze.stats.distributions.{Gamma, RandBasis} import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector} @@ -34,7 +28,7 @@ import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector} * Serializable companion object containing helper methods and shared code for * [[OnlineLDAOptimizer]] and [[LocalLDAModel]]. */ -private[clustering] object OnlineLDAOptimizerXObj { +private[spark] object OnlineLDAOptimizerXObj { /** * Uses variational inference to infer the topic distribution `gammad` given the term counts * for a document. `termCounts` must contain at least one non-zero entry, otherwise Breeze will @@ -47,25 +41,24 @@ private[clustering] object OnlineLDAOptimizerXObj { * @return Returns a tuple of `gammad` - estimate of gamma, the topic distribution, `sstatsd` - * statistics for updating lambda and `ids` - list of termCounts vector indices. */ - private[clustering] def variationalTopicInference( - termCounts: Vector, + private[spark] def variationalTopicInference( + indices: List[Int], + values: Array[Double], expElogbeta: BDM[Double], alpha: breeze.linalg.Vector[Double], gammaShape: Double, - k: Int): (BDV[Double], BDM[Double], List[Int]) = { - val (ids: List[Int], cts: Array[Double]) = termCounts match { - case v: DenseVector => ((0 until v.size).toList, v.values) - case v: SparseVector => (v.indices.toList, v.values) - } + k: Int, + seed: Long): (BDV[Double], BDM[Double], List[Int]) = { // Initialize the variational distribution q(theta|gamma) for the mini-batch + val randBasis = new RandBasis(new org.apache.commons.math3.random.MersenneTwister(seed)) val gammad: BDV[Double] = - new Gamma(gammaShape, 1.0 / gammaShape).samplesVector(k) // K + new Gamma(gammaShape, 1.0 / gammaShape)(randBasis).samplesVector(k) // K val expElogthetad: BDV[Double] = exp(LDAUtilsX.dirichletExpectation(gammad)) // K - val expElogbetad = expElogbeta(ids, ::).toDenseMatrix // ids * K + val expElogbetad = expElogbeta(indices, ::).toDenseMatrix // ids * K val phiNorm: BDV[Double] = expElogbetad * expElogthetad +:+ 1e-100 // ids var meanGammaChange = 1D - val ctsVector = new BDV[Double](cts) // ids + val ctsVector = new BDV[Double](values) // ids // Iterate between gamma and phi until convergence while (meanGammaChange > 1e-3) { @@ -79,6 +72,20 @@ private[clustering] object OnlineLDAOptimizerXObj { } val sstatsd = expElogthetad.asDenseMatrix.t * (ctsVector /:/ phiNorm).asDenseMatrix - (gammad, sstatsd, ids) + (gammad, sstatsd, indices) + } + + private[clustering] def variationalTopicInference( + termCounts: Vector, + expElogbeta: BDM[Double], + alpha: breeze.linalg.Vector[Double], + gammaShape: Double, + k: Int, + seed: Long): (BDV[Double], BDM[Double], List[Int]) = { + val (ids: List[Int], cts: Array[Double]) = termCounts match { + case v: DenseVector => (List.range(0, v.size), v.values) + case v: SparseVector => (v.indices.toList, v.values) + } + variationalTopicInference(ids, cts, expElogbeta, alpha, gammaShape, k, seed) } } diff --git a/ml-core/src/main/scala/org/apache/spark/mllib/fpm/LocalPrefixSpan.scala b/ml-core/src/main/scala/org/apache/spark/mllib/fpm/LocalPrefixSpan.scala index 43e76b2..789c299 100644 --- a/ml-core/src/main/scala/org/apache/spark/mllib/fpm/LocalPrefixSpan.scala +++ b/ml-core/src/main/scala/org/apache/spark/mllib/fpm/LocalPrefixSpan.scala @@ -1,9 +1,3 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with diff --git a/ml-core/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpanBase.scala b/ml-core/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpanBase.scala index 573aace..313dfa5 100644 --- a/ml-core/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpanBase.scala +++ b/ml-core/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpanBase.scala @@ -1,9 +1,3 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with diff --git a/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Entropy.scala b/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Entropy.scala deleted file mode 100644 index 7406c0d..0000000 --- a/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Entropy.scala +++ /dev/null @@ -1,171 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.tree.impurity - -import org.apache.spark.annotation.{DeveloperApi, Since} - -/** - * Class for calculating entropy during multiclass classification. - */ -@Since("1.0.0") -object Entropy extends Impurity { - - private[tree] def log2(x: Double) = scala.math.log(x) / scala.math.log(2) - - /** - * :: DeveloperApi :: - * information calculation for multiclass classification - * @param counts Array[Double] with counts for each label - * @param totalCount sum of counts for all labels - * @return information value, or 0 if totalCount = 0 - */ - @Since("1.1.0") - @DeveloperApi - override def calculate(counts: Array[Double], totalCount: Double): Double = { - if (totalCount == 0) { - return 0 - } - val numClasses = counts.length - var impurity = 0.0 - var classIndex = 0 - while (classIndex < numClasses) { - val classCount = counts(classIndex) - if (classCount != 0) { - val freq = classCount / totalCount - impurity -= freq * log2(freq) - } - classIndex += 1 - } - impurity - } - - /** - * :: DeveloperApi :: - * variance calculation - * @param count number of instances - * @param sum sum of labels - * @param sumSquares summation of squares of the labels - * @return information value, or 0 if count = 0 - */ - @Since("1.0.0") - @DeveloperApi - override def calculate(count: Double, sum: Double, sumSquares: Double): Double = - throw new UnsupportedOperationException("Entropy.calculate") - - /** - * Get this impurity instance. - * This is useful for passing impurity parameters to a Strategy in Java. - */ - @Since("1.1.0") - def instance: this.type = this - -} - -/** - * Class for updating views of a vector of sufficient statistics, - * in order to compute impurity from a sample. - * Note: Instances of this class do not hold the data; they operate on views of the data. - * @param numClasses Number of classes for label. - */ -private[spark] class EntropyAggregator(numClasses: Int) - extends ImpurityAggregator(numClasses) with Serializable { - - /** - * Update stats for one (node, feature, bin) with the given label. - * @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous. - * @param offset Start index of stats for this (node, feature, bin). - */ - def update(allStats: Array[Double], offset: Int, label: Double, instanceWeight: Int): Unit = { - if (label >= statsSize) { - throw new IllegalArgumentException(s"EntropyAggregator given label $label" + - s" but requires label < numClasses (= $statsSize).") - } - if (label < 0) { - throw new IllegalArgumentException(s"EntropyAggregator given label $label" + - s"but requires label is non-negative.") - } - allStats(offset + label.toInt) += instanceWeight - } - - /** - * Get an [[ImpurityCalculator]] for a (node, feature, bin). - * @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous. - * @param offset Start index of stats for this (node, feature, bin). - */ - def getCalculator(allStats: Array[Double], offset: Int): EntropyCalculator = { - new EntropyCalculator(allStats.view(offset, offset + statsSize).toArray) - } -} - -/** - * Stores statistics for one (node, feature, bin) for calculating impurity. - * Unlike [[EntropyAggregator]], this class stores its own data and is for a specific - * (node, feature, bin). - * @param stats Array of sufficient statistics for a (node, feature, bin). - */ -private[spark] class EntropyCalculator(stats: Array[Double]) extends ImpurityCalculator(stats) { - - /** - * Make a deep copy of this [[ImpurityCalculator]]. - */ - def copy: EntropyCalculator = new EntropyCalculator(stats.clone()) - - /** - * Calculate the impurity from the stored sufficient statistics. - */ - def calculate(): Double = Entropy.calculate(stats, stats.sum) - - /** - * Number of data points accounted for in the sufficient statistics. - */ - def count: Long = stats.sum.toLong - - /** - * Prediction which should be made based on the sufficient statistics. - */ - def predict: Double = if (count == 0) { - 0 - } else { - indexOfLargestArrayElement(stats) - } - - /** - * Probability of the label given by [[predict]]. - */ - override def prob(label: Double): Double = { - val lbl = label.toInt - require(lbl < stats.length, - s"EntropyCalculator.prob given invalid label: $lbl (should be < ${stats.length}") - require(lbl >= 0, "Entropy does not support negative labels") - val cnt = count - if (cnt == 0) { - 0 - } else { - stats(lbl) / cnt - } - } - - override def toString: String = s"EntropyCalculator(stats = [${stats.mkString(", ")}])" - -} diff --git a/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala b/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala deleted file mode 100644 index f182519..0000000 --- a/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala +++ /dev/null @@ -1,168 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.tree.impurity - -import org.apache.spark.annotation.{DeveloperApi, Since} - -/** - * Class for calculating the Gini impurity - * (http://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity) - * during multiclass classification. - */ -@Since("1.0.0") -object Gini extends Impurity { - - /** - * :: DeveloperApi :: - * information calculation for multiclass classification - * @param counts Array[Double] with counts for each label - * @param totalCount sum of counts for all labels - * @return information value, or 0 if totalCount = 0 - */ - @Since("1.1.0") - @DeveloperApi - override def calculate(counts: Array[Double], totalCount: Double): Double = { - if (totalCount == 0) { - return 0 - } - val numClasses = counts.length - var impurity = 1.0 - var classIndex = 0 - while (classIndex < numClasses) { - val freq = counts(classIndex) / totalCount - impurity -= freq * freq - classIndex += 1 - } - impurity - } - - /** - * :: DeveloperApi :: - * variance calculation - * @param count number of instances - * @param sum sum of labels - * @param sumSquares summation of squares of the labels - * @return information value, or 0 if count = 0 - */ - @Since("1.0.0") - @DeveloperApi - override def calculate(count: Double, sum: Double, sumSquares: Double): Double = - throw new UnsupportedOperationException("Gini.calculate") - - /** - * Get this impurity instance. - * This is useful for passing impurity parameters to a Strategy in Java. - */ - @Since("1.1.0") - def instance: this.type = this - -} - -/** - * Class for updating views of a vector of sufficient statistics, - * in order to compute impurity from a sample. - * Note: Instances of this class do not hold the data; they operate on views of the data. - * @param numClasses Number of classes for label. - */ -private[spark] class GiniAggregator(numClasses: Int) - extends ImpurityAggregator(numClasses) with Serializable { - - /** - * Update stats for one (node, feature, bin) with the given label. - * @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous. - * @param offset Start index of stats for this (node, feature, bin). - */ - def update(allStats: Array[Double], offset: Int, label: Double, instanceWeight: Int): Unit = { - if (label >= statsSize) { - throw new IllegalArgumentException(s"GiniAggregator given label $label" + - s" but requires label < numClasses (= $statsSize).") - } - if (label < 0) { - throw new IllegalArgumentException(s"GiniAggregator given label $label" + - s"but requires label is non-negative.") - } - allStats(offset + label.toInt) += instanceWeight - } - - /** - * Get an [[ImpurityCalculator]] for a (node, feature, bin). - * @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous. - * @param offset Start index of stats for this (node, feature, bin). - */ - def getCalculator(allStats: Array[Double], offset: Int): GiniCalculator = { - new GiniCalculator(allStats.view(offset, offset + statsSize).toArray) - } -} - -/** - * Stores statistics for one (node, feature, bin) for calculating impurity. - * Unlike [[GiniAggregator]], this class stores its own data and is for a specific - * (node, feature, bin). - * @param stats Array of sufficient statistics for a (node, feature, bin). - */ -private[spark] class GiniCalculator(stats: Array[Double]) extends ImpurityCalculator(stats) { - - /** - * Make a deep copy of this [[ImpurityCalculator]]. - */ - def copy: GiniCalculator = new GiniCalculator(stats.clone()) - - /** - * Calculate the impurity from the stored sufficient statistics. - */ - def calculate(): Double = Gini.calculate(stats, stats.sum) - - /** - * Number of data points accounted for in the sufficient statistics. - */ - def count: Long = stats.sum.toLong - - /** - * Prediction which should be made based on the sufficient statistics. - */ - def predict: Double = if (count == 0) { - 0 - } else { - indexOfLargestArrayElement(stats) - } - - /** - * Probability of the label given by [[predict]]. - */ - override def prob(label: Double): Double = { - val lbl = label.toInt - require(lbl < stats.length, - s"GiniCalculator.prob given invalid label: $lbl (should be < ${stats.length}") - require(lbl >= 0, "GiniImpurity does not support negative labels") - val cnt = count - if (cnt == 0) { - 0 - } else { - stats(lbl) / cnt - } - } - - override def toString: String = s"GiniCalculator(stats = [${stats.mkString(", ")}])" - -} diff --git a/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurities.scala b/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurities.scala deleted file mode 100644 index f470091..0000000 --- a/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurities.scala +++ /dev/null @@ -1,38 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.tree.impurity - -/** - * Factory for Impurity instances. - */ -private[mllib] object Impurities { - - def fromString(name: String): Impurity = name match { - case "gini" => Gini - case "entropy" => Entropy - case "variance" => Variance - case _ => throw new IllegalArgumentException(s"Did not recognize Impurity name: $name") - } - -} diff --git a/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala b/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala deleted file mode 100644 index 0492e1c..0000000 --- a/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala +++ /dev/null @@ -1,204 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.tree.impurity - -import java.util.Locale - -import org.apache.spark.annotation.{DeveloperApi, Since} - -/** - * Trait for calculating information gain. - * This trait is used for - * (a) setting the impurity parameter in [[org.apache.spark.mllib.tree.configuration.Strategy]] - * (b) calculating impurity values from sufficient statistics. - */ -@Since("1.0.0") -trait Impurity extends Serializable { - - /** - * :: DeveloperApi :: - * information calculation for multiclass classification - * @param counts Array[Double] with counts for each label - * @param totalCount sum of counts for all labels - * @return information value, or 0 if totalCount = 0 - */ - @Since("1.1.0") - @DeveloperApi - def calculate(counts: Array[Double], totalCount: Double): Double - - /** - * :: DeveloperApi :: - * information calculation for regression - * @param count number of instances - * @param sum sum of labels - * @param sumSquares summation of squares of the labels - * @return information value, or 0 if count = 0 - */ - @Since("1.0.0") - @DeveloperApi - def calculate(count: Double, sum: Double, sumSquares: Double): Double -} - -/** - * Interface for updating views of a vector of sufficient statistics, - * in order to compute impurity from a sample. - * Note: Instances of this class do not hold the data; they operate on views of the data. - * @param statsSize Length of the vector of sufficient statistics for one bin. - */ -private[spark] abstract class ImpurityAggregator(val statsSize: Int) extends Serializable { - - /** - * Merge the stats from one bin into another. - * @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous. - * @param offset Start index of stats for (node, feature, bin) which is modified by the merge. - * @param otherOffset Start index of stats for (node, feature, other bin) which is not modified. - */ - def merge(allStats: Array[Double], offset: Int, otherOffset: Int): Unit = { - var i = 0 - while (i < statsSize) { - allStats(offset + i) += allStats(otherOffset + i) - i += 1 - } - } - - /** - * Update stats for one (node, feature, bin) with the given label. - * @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous. - * @param offset Start index of stats for this (node, feature, bin). - */ - def update(allStats: Array[Double], offset: Int, label: Double, instanceWeight: Int): Unit - - /** - * Get an [[ImpurityCalculator]] for a (node, feature, bin). - * @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous. - * @param offset Start index of stats for this (node, feature, bin). - */ - def getCalculator(allStats: Array[Double], offset: Int): ImpurityCalculator -} - -/** - * Stores statistics for one (node, feature, bin) for calculating impurity. - * Unlike [[ImpurityAggregator]], this class stores its own data and is for a specific - * (node, feature, bin). - * @param stats Array of sufficient statistics for a (node, feature, bin). - */ -private[spark] abstract class ImpurityCalculator(val stats: Array[Double]) extends Serializable { - - /** - * Make a deep copy of this [[ImpurityCalculator]]. - */ - def copy: ImpurityCalculator - - /** - * Calculate the impurity from the stored sufficient statistics. - */ - def calculate(): Double - - /** - * Add the stats from another calculator into this one, modifying and returning this calculator. - */ - def add(other: ImpurityCalculator): ImpurityCalculator = { - require(stats.length == other.stats.length, - s"Two ImpurityCalculator instances cannot be added with different counts sizes." + - s" Sizes are ${stats.length} and ${other.stats.length}.") - var i = 0 - val len = other.stats.length - while (i < len) { - stats(i) += other.stats(i) - i += 1 - } - this - } - - /** - * Subtract the stats from another calculator from this one, modifying and returning this - * calculator. - */ - def subtract(other: ImpurityCalculator): ImpurityCalculator = { - require(stats.length == other.stats.length, - s"Two ImpurityCalculator instances cannot be subtracted with different counts sizes." + - s" Sizes are ${stats.length} and ${other.stats.length}.") - var i = 0 - val len = other.stats.length - while (i < len) { - stats(i) -= other.stats(i) - i += 1 - } - this - } - - /** - * Number of data points accounted for in the sufficient statistics. - */ - def count: Long - - /** - * Prediction which should be made based on the sufficient statistics. - */ - def predict: Double - - /** - * Probability of the label given by [[predict]], or -1 if no probability is available. - */ - def prob(label: Double): Double = -1 - - /** - * Return the index of the largest array element. - * Fails if the array is empty. - */ - protected def indexOfLargestArrayElement(array: Array[Double]): Int = { - val result = array.foldLeft((-1, Double.MinValue, 0)) { - case ((maxIndex, maxValue, currentIndex), currentValue) => - if (currentValue > maxValue) { - (currentIndex, currentValue, currentIndex + 1) - } else { - (maxIndex, maxValue, currentIndex + 1) - } - } - if (result._1 < 0) { - throw new RuntimeException("ImpurityCalculator internal error:" + - " indexOfLargestArrayElement failed") - } - result._1 - } - -} - -private[spark] object ImpurityCalculator { - - /** - * Create an [[ImpurityCalculator]] instance of the given impurity type and with - * the given stats. - */ - def getCalculator(impurity: String, stats: Array[Double]): ImpurityCalculator = { - impurity.toLowerCase(Locale.ROOT) match { - case "gini" => new GiniCalculator(stats) - case "entropy" => new EntropyCalculator(stats) - case "variance" => new VarianceCalculator(stats) - case _ => - throw new IllegalArgumentException( - s"ImpurityCalculator builder did not recognize impurity type: $impurity") - } - } -} diff --git a/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Variance.scala b/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Variance.scala deleted file mode 100644 index 732ceaa..0000000 --- a/ml-core/src/main/scala/org/apache/spark/mllib/tree/impurity/Variance.scala +++ /dev/null @@ -1,148 +0,0 @@ -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.mllib.tree.impurity - -import org.apache.spark.annotation.{DeveloperApi, Since} - -/** - * Class for calculating variance during regression - */ -@Since("1.0.0") -object Variance extends Impurity { - - /** - * :: DeveloperApi :: - * information calculation for multiclass classification - * @param counts Array[Double] with counts for each label - * @param totalCount sum of counts for all labels - * @return information value, or 0 if totalCount = 0 - */ - @Since("1.1.0") - @DeveloperApi - override def calculate(counts: Array[Double], totalCount: Double): Double = - throw new UnsupportedOperationException("Variance.calculate") - - /** - * :: DeveloperApi :: - * variance calculation - * @param count number of instances - * @param sum sum of labels - * @param sumSquares summation of squares of the labels - * @return information value, or 0 if count = 0 - */ - @Since("1.0.0") - @DeveloperApi - override def calculate(count: Double, sum: Double, sumSquares: Double): Double = { - if (count == 0) { - return 0 - } - val squaredLoss = sumSquares - (sum * sum) / count - squaredLoss / count - } - - /** - * Get this impurity instance. - * This is useful for passing impurity parameters to a Strategy in Java. - */ - @Since("1.0.0") - def instance: this.type = this - -} - -/** - * Class for updating views of a vector of sufficient statistics, - * in order to compute impurity from a sample. - * Note: Instances of this class do not hold the data; they operate on views of the data. - */ -private[spark] class VarianceAggregator() - extends ImpurityAggregator(statsSize = 3) with Serializable { - - /** - * Update stats for one (node, feature, bin) with the given label. - * @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous. - * @param offset Start index of stats for this (node, feature, bin). - */ - def update(allStats: Array[Double], offset: Int, label: Double, instanceWeight: Int): Unit = { - allStats(offset) += instanceWeight - allStats(offset + 1) += instanceWeight * label - allStats(offset + 2) += instanceWeight * label * label - } - - def updateX(allStats: Array[Double], offset: Int, label: Double): Unit = { - allStats(offset) += 1.0 - allStats(offset + 1) += label - allStats(offset + 2) += label * label - } - - /** - * Get an [[ImpurityCalculator]] for a (node, feature, bin). - * @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous. - * @param offset Start index of stats for this (node, feature, bin). - */ - def getCalculator(allStats: Array[Double], offset: Int): VarianceCalculator = { - new VarianceCalculator(allStats.view(offset, offset + statsSize).toArray) - } -} - -/** - * Stores statistics for one (node, feature, bin) for calculating impurity. - * Unlike [[GiniAggregator]], this class stores its own data and is for a specific - * (node, feature, bin). - * @param stats Array of sufficient statistics for a (node, feature, bin). - */ -private[spark] class VarianceCalculator(stats: Array[Double]) extends ImpurityCalculator(stats) { - - require(stats.length == 3, - s"VarianceCalculator requires sufficient statistics array stats to be of length 3," + - s" but was given array of length ${stats.length}.") - - /** - * Make a deep copy of this [[ImpurityCalculator]]. - */ - def copy: VarianceCalculator = new VarianceCalculator(stats.clone()) - - /** - * Calculate the impurity from the stored sufficient statistics. - */ - def calculate(): Double = Variance.calculate(stats(0), stats(1), stats(2)) - - /** - * Number of data points accounted for in the sufficient statistics. - */ - def count: Long = stats(0).toLong - - /** - * Prediction which should be made based on the sufficient statistics. - */ - def predict: Double = if (count == 0) { - 0 - } else { - stats(1) / count - } - - override def toString: String = { - s"VarianceAggregator(cnt = ${stats(0)}, sum = ${stats(1)}, sum2 = ${stats(2)})" - } - -} diff --git a/ml-kernel-client-core/pom.xml b/ml-kernel-client-core/pom.xml index 561c79e..e0e7f74 100644 --- a/ml-kernel-client-core/pom.xml +++ b/ml-kernel-client-core/pom.xml @@ -6,7 +6,7 @@ 4.0.0 - boostkit-ml-kernel-client-core_2.11 + boostkit-ml-kernel-client-core_2.12 2.1.0 ${project.artifactId} Spark ml core diff --git a/ml-kernel-client/pom.xml b/ml-kernel-client/pom.xml index 2be4e38..7434007 100644 --- a/ml-kernel-client/pom.xml +++ b/ml-kernel-client/pom.xml @@ -6,7 +6,7 @@ 4.0.0 - boostkit-ml-kernel-client_2.11 + boostkit-ml-kernel-client_2.12 2.1.0 ${project.artifactId} Spark ml core @@ -14,7 +14,7 @@ org.apache.spark - boostkit-ml-core_2.11 + boostkit-ml-core_2.12 ${project.version} ${spark.version} diff --git a/ml-kernel-client/src/main/scala/breeze/linalg/blas/YTYUtils.scala b/ml-kernel-client/src/main/scala/breeze/linalg/blas/YTYUtils.scala index 8dd48df..8c43aa4 100644 --- a/ml-kernel-client/src/main/scala/breeze/linalg/blas/YTYUtils.scala +++ b/ml-kernel-client/src/main/scala/breeze/linalg/blas/YTYUtils.scala @@ -6,7 +6,10 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ /* - * Copyright (c). Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * This file to You under the Apache License, Version 2.0; + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 */ package breeze.linalg.blas diff --git a/ml-kernel-client/src/main/scala/breeze/optimize/ACC.scala b/ml-kernel-client/src/main/scala/breeze/optimize/ACC.scala deleted file mode 100644 index bd2f43a..0000000 --- a/ml-kernel-client/src/main/scala/breeze/optimize/ACC.scala +++ /dev/null @@ -1,47 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * This file to You under the Apache License, Version 2.0; - * you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - */ - -package breeze.optimize - -import breeze.math.MutableInnerProductModule - -object ACC{ - - def update[T]( - step: T, - delta: T, - mStep: IndexedSeq[T], - g: IndexedSeq[T], - m: Int) (implicit space: MutableInnerProductModule[T, Double]): - (IndexedSeq[T], IndexedSeq[T]) = { - null - } - - def updateMomentum[T]( - m: T, - dir: T, - coeff: Double, - uCoeff: Double) (implicit space: MutableInnerProductModule[T, Double]): T = { - null.asInstanceOf[T] - } - - def getInverseOfHessian[T]( - g: T, - deltaA: IndexedSeq[T], - deltaB: IndexedSeq[T], - m: Int, - size: Int)(implicit space: MutableInnerProductModule[T, Double]): T = { - null.asInstanceOf[T] - } -} diff --git a/ml-kernel-client/src/main/scala/breeze/optimize/LBFGSL.scala b/ml-kernel-client/src/main/scala/breeze/optimize/LBFGSL.scala deleted file mode 100644 index ac14511..0000000 --- a/ml-kernel-client/src/main/scala/breeze/optimize/LBFGSL.scala +++ /dev/null @@ -1,84 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * This file to You under the Apache License, Version 2.0; - * you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - */ - -package breeze.optimize - -import breeze.linalg.DenseVector -import breeze.util.SerializableLogging - -class LBFGSL( - var lowerBounds: DenseVector[Double], - var upperBounds: DenseVector[Double], - maxIterations: Int, - maxFCalls: Int, - m: Int, - fTolerance: Double, - xTolerance: Double) - extends FirstOrderMinimizer[DenseVector[Double], DiffFunction[DenseVector[Double]]](null) - with SerializableLogging { - - def this( - lowerBounds: DenseVector[Double], - upperBounds: DenseVector[Double], - maxIterations: Int, - m: Int, - fTolerance: Double) = { - this(lowerBounds, upperBounds, maxIterations, 0, m, fTolerance, 0.0) - } - - def this(maxIterations: Int, m: Int, fTolerance: Double) = { - this(null, null, maxIterations, 0, m, fTolerance, 0.0) - } - - override def iterations(f: DiffFunction[DenseVector[Double]], x0: DenseVector[Double]): - Iterator[FirstOrderMinimizer.State[DenseVector[Double], convergenceCheck.Info, History]] = { - null - } - - case class History() - - override def initialHistory( - f: DiffFunction[DenseVector[Double]], - init: DenseVector[Double]): History = { - null - } - - override def updateHistory( - newX: DenseVector[Double], - newGrad: DenseVector[Double], - newVal: Double, - f: DiffFunction[DenseVector[Double]], - oldState: State): History = { - null - } - - override def chooseDescentDirection(state: State, f: DiffFunction[DenseVector[Double]]): - DenseVector[Double] = { - null - } - - override def determineStepSize( - state: State, - f: DiffFunction[DenseVector[Double]], - direction: DenseVector[Double]): Double = { - 0.0 - } - - override def takeStep( - state: State, - dir: DenseVector[Double], - stepSize: Double): DenseVector[Double] = { - null - } -} diff --git a/ml-kernel-client/src/main/scala/breeze/optimize/OWLQNL.scala b/ml-kernel-client/src/main/scala/breeze/optimize/OWLQNL.scala deleted file mode 100644 index abb342d..0000000 --- a/ml-kernel-client/src/main/scala/breeze/optimize/OWLQNL.scala +++ /dev/null @@ -1,78 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * This file to You under the Apache License, Version 2.0; - * you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - */ - -package breeze.optimize - -import breeze.linalg.DenseVector -import breeze.util.SerializableLogging - -class OWLQNL( - maxIterations: Int, - maxFCalls: Int, - m: Int, - fTolerance: Double, - xTolerance: Double, - var l1RegParam: DenseVector[Double]) - extends FirstOrderMinimizer[DenseVector[Double], DiffFunction[DenseVector[Double]]](null) - with SerializableLogging { - - def this( - maxIterations: Int, - m: Int, - fTolerance: Double, - l1RegParam: DenseVector[Double]) = { - this(maxIterations, 0, m, fTolerance, 0.0, l1RegParam) - } - - override def iterations(f: DiffFunction[DenseVector[Double]], x0: DenseVector[Double]): - Iterator[FirstOrderMinimizer.State[DenseVector[Double], convergenceCheck.Info, History]] = { - null - } - - case class History() - - override def initialHistory( - f: DiffFunction[DenseVector[Double]], - init: DenseVector[Double]): History = { - null - } - - override def updateHistory( - newX: DenseVector[Double], - newGrad: DenseVector[Double], - newVal: Double, - f: DiffFunction[DenseVector[Double]], - oldState: State): History = { - null - } - - override def chooseDescentDirection(state: State, f: DiffFunction[DenseVector[Double]]): - DenseVector[Double] = { - null - } - - override def determineStepSize( - state: State, - f: DiffFunction[DenseVector[Double]], - direction: DenseVector[Double]): Double = { - 0.0 - } - - override def takeStep( - state: State, - dir: DenseVector[Double], - stepSize: Double): DenseVector[Double] = { - null - } -} diff --git a/ml-kernel-client/src/main/scala/org/apache/spark/ml/recommendation/ALSUtils.scala b/ml-kernel-client/src/main/scala/org/apache/spark/ml/recommendation/ALSUtils.scala index dca6a58..bf33ee6 100644 --- a/ml-kernel-client/src/main/scala/org/apache/spark/ml/recommendation/ALSUtils.scala +++ b/ml-kernel-client/src/main/scala/org/apache/spark/ml/recommendation/ALSUtils.scala @@ -6,7 +6,10 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ /* - * Copyright (c). Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * This file to You under the Apache License, Version 2.0; + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 */ package org.apache.spark.ml.recommendation diff --git a/ml-kernel-client/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTreesUtil.scala b/ml-kernel-client/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTreesUtil.scala deleted file mode 100644 index 736cfaf..0000000 --- a/ml-kernel-client/src/main/scala/org/apache/spark/ml/tree/impl/GradientBoostedTreesUtil.scala +++ /dev/null @@ -1,77 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * This file to You under the Apache License, Version 2.0; - * you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - */ - -package org.apache.spark.ml.tree.impl - -import it.unimi.dsi.fastutil.doubles.DoubleArrayList -import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap -import it.unimi.dsi.fastutil.ints.IntArrayList -import it.unimi.dsi.fastutil.objects.ObjectArrayList - -import org.apache.spark.broadcast.Broadcast -import org.apache.spark.internal.Logging -import org.apache.spark.ml.feature.LabeledPoint -import org.apache.spark.ml.tree.LearningNode -import org.apache.spark.ml.tree.Split -import org.apache.spark.ml.tree.impl.GradientBoostedTreesCore.NodeIndexInfo -import org.apache.spark.mllib.tree.configuration.{Strategy => OldStrategy} -import org.apache.spark.mllib.tree.model.ImpurityStats -import org.apache.spark.rdd.RDD - -object GradientBoostedTreesUtil extends Logging { - - def dataProcessX( - input: RDD[LabeledPoint], - splits: Array[Array[Split]], - treeStrategy: OldStrategy, - metadata: DecisionTreeMetadata, - timer: TimeTracker, - seed: Long): (RDD[TreePoint], RDD[(Int, (IntArrayList, ObjectArrayList[Split]))], - Broadcast[DoubleArrayList], Broadcast[Int2ObjectOpenHashMap[IntArrayList]]) = { - null - } - - def nodeIdCacheXConstruction( - nodes: Array[LearningNode], - rawPartInfoBc: Broadcast[Int2ObjectOpenHashMap[IntArrayList]]) - : Int2ObjectOpenHashMap[Int2ObjectOpenHashMap[IntArrayList]] = { - null - } - - def chooseBestSplits( - input: RDD[(Int, (IntArrayList, ObjectArrayList[Split]))], - nodeIndexInfo: Map[Int, Map[Int, NodeIndexInfo]], - metadata: DecisionTreeMetadata, - nodeIdCacheBc: Broadcast[Int2ObjectOpenHashMap[Int2ObjectOpenHashMap[IntArrayList]]], - labelArrayBc: Broadcast[DoubleArrayList], - nodes: Array[LearningNode]): scala.collection.Map[Int, (Split, ImpurityStats)] = { - null - } - - - - - def updateNodeIdCache( - nodeIdCache: Int2ObjectOpenHashMap[Int2ObjectOpenHashMap[IntArrayList]], - nodeIdCacheBc: Broadcast[Int2ObjectOpenHashMap[Int2ObjectOpenHashMap[IntArrayList]]], - input: RDD[TreePoint], - nodesForGroup: Map[Int, Array[LearningNode]], - nodeIndexInfo: Map[Int, Map[Int, NodeIndexInfo]], - splits: Array[Array[Split]], - rawPartInfoBc: Broadcast[Int2ObjectOpenHashMap[IntArrayList]], - metadata: DecisionTreeMetadata, - timer: TimeTracker): Unit = { - } - -} diff --git a/ml-kernel-client/src/main/scala/org/apache/spark/mllib.clustering/KmeansUtil.scala b/ml-kernel-client/src/main/scala/org/apache/spark/mllib.clustering/KmeansUtil.scala deleted file mode 100644 index 83de295..0000000 --- a/ml-kernel-client/src/main/scala/org/apache/spark/mllib.clustering/KmeansUtil.scala +++ /dev/null @@ -1,38 +0,0 @@ -// scalastyle:off header.matches -/* -* Copyright (C) 2021. Huawei Technologies Co., Ltd. -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -* */ -/* - * This file to You under the Apache License, Version 2.0; - * you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - */ - -package org.apache.spark.mllib.clustering - -object KmeansUtil { - - def generateDisMatrix( - centers: Array[VectorWithNorm], parLevel: Int): Array[Double] = { - val cl = centers.length - Array.fill(cl * cl)(0.0) - } - - def findClosest( - centers: TraversableOnce[VectorWithNorm], - point: VectorWithNorm, - s: Array[Double]): (Int, Double) = { - (-1, -1.0) - } - - def fastDistance( - v1: VectorWithNorm, - v2: VectorWithNorm): Double = { - -1.0 - } - -} diff --git a/ml-kernel-client/src/main/scala/org/apache/spark/mllib.clustering/LDAUtilsXOpt.scala b/ml-kernel-client/src/main/scala/org/apache/spark/mllib.clustering/LDAUtilsXOpt.scala index e5c042f..e06a3d6 100644 --- a/ml-kernel-client/src/main/scala/org/apache/spark/mllib.clustering/LDAUtilsXOpt.scala +++ b/ml-kernel-client/src/main/scala/org/apache/spark/mllib.clustering/LDAUtilsXOpt.scala @@ -50,7 +50,8 @@ object LDAUtilsXOpt { vocabSize: Int, logphatPartOptionBase: () => Option[BDV[Double]], alpha: BV[Double], - gammaShape: Double): RDD[(BDM[Double], Option[BDV[Double]], Long)] = { + gammaShape: Double, + seed: Long): RDD[(BDM[Double], Option[BDV[Double]], Long)] = { null } diff --git a/ml-xgboost/.clang-tidy b/ml-xgboost/.clang-tidy deleted file mode 100644 index 3be1d9e..0000000 --- a/ml-xgboost/.clang-tidy +++ /dev/null @@ -1,21 +0,0 @@ -Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming' -CheckOptions: - - { key: readability-identifier-naming.ClassCase, value: CamelCase } - - { key: readability-identifier-naming.StructCase, value: CamelCase } - - { key: readability-identifier-naming.TypeAliasCase, value: CamelCase } - - { key: readability-identifier-naming.TypedefCase, value: CamelCase } - - { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase } - - { key: readability-identifier-naming.MemberCase, value: lower_case } - - { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' } - - { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' } - - { key: readability-identifier-naming.EnumCase, value: CamelCase } - - { key: readability-identifier-naming.EnumConstant, value: CamelCase } - - { key: readability-identifier-naming.EnumConstantPrefix, value: k } - - { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase } - - { key: readability-identifier-naming.GlobalConstantPrefix, value: k } - - { key: readability-identifier-naming.StaticConstantCase, value: CamelCase } - - { key: readability-identifier-naming.StaticConstantPrefix, value: k } - - { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase } - - { key: readability-identifier-naming.ConstexprVariablePrefix, value: k } - - { key: readability-identifier-naming.FunctionCase, value: CamelCase } - - { key: readability-identifier-naming.NamespaceCase, value: lower_case } diff --git a/ml-xgboost/.editorconfig b/ml-xgboost/.editorconfig deleted file mode 100644 index 97a7bc1..0000000 --- a/ml-xgboost/.editorconfig +++ /dev/null @@ -1,11 +0,0 @@ -root = true - -[*] -charset=utf-8 -indent_style = space -indent_size = 2 -insert_final_newline = true - -[*.py] -indent_style = space -indent_size = 4 diff --git a/ml-xgboost/.gitmodules b/ml-xgboost/.gitmodules deleted file mode 100644 index 35afd1d..0000000 --- a/ml-xgboost/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "cub"] - path = cub - url = https://github.com/NVlabs/cub diff --git a/ml-xgboost/.travis.yml b/ml-xgboost/.travis.yml deleted file mode 100644 index de23566..0000000 --- a/ml-xgboost/.travis.yml +++ /dev/null @@ -1,79 +0,0 @@ -# disable sudo for container build. -sudo: required - -# Enabling test OS X -os: - - linux - - osx - -osx_image: xcode10.1 -dist: bionic - -# Use Build Matrix to do lint and build seperately -env: - matrix: - # python package test - - TASK=python_test - # test installation of Python source distribution - - TASK=python_sdist_test - # java package test - - TASK=java_test - # cmake test - - TASK=cmake_test - - global: - - secure: "PR16i9F8QtNwn99C5NDp8nptAS+97xwDtXEJJfEiEVhxPaaRkOp0MPWhogCaK0Eclxk1TqkgWbdXFknwGycX620AzZWa/A1K3gAs+GrpzqhnPMuoBJ0Z9qxXTbSJvCyvMbYwVrjaxc/zWqdMU8waWz8A7iqKGKs/SqbQ3rO6v7c=" - - secure: "dAGAjBokqm/0nVoLMofQni/fWIBcYSmdq4XvCBX1ZAMDsWnuOfz/4XCY6h2lEI1rVHZQ+UdZkc9PioOHGPZh5BnvE49/xVVWr9c4/61lrDOlkD01ZjSAeoV0fAZq+93V/wPl4QV+MM+Sem9hNNzFSbN5VsQLAiWCSapWsLdKzqA=" - -matrix: - exclude: - - os: linux - env: TASK=python_test - - os: linux - env: TASK=java_test - - os: linux - env: TASK=cmake_test - -# dependent brew packages -addons: - homebrew: - packages: - - cmake - - libomp - - graphviz - - openssl - - libgit2 - - wget - - r - update: true - -before_install: - - source tests/travis/travis_setup_env.sh - - if [ "${TASK}" != "python_sdist_test" ]; then export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package; fi - - echo "MAVEN_OPTS='-Xmx2g -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc - -install: - - source tests/travis/setup.sh - -script: - - tests/travis/run_test.sh - -cache: - directories: - - ${HOME}/.cache/usr - - ${HOME}/.cache/pip - -before_cache: - - tests/travis/travis_before_cache.sh - -after_failure: - - tests/travis/travis_after_failure.sh - -after_success: - - tree build - - bash <(curl -s https://codecov.io/bash) -a '-o src/ src/*.c' - -notifications: - email: - on_success: change - on_failure: always diff --git a/ml-xgboost/CITATION b/ml-xgboost/CITATION deleted file mode 100644 index 1890625..0000000 --- a/ml-xgboost/CITATION +++ /dev/null @@ -1,18 +0,0 @@ -@inproceedings{Chen:2016:XST:2939672.2939785, - author = {Chen, Tianqi and Guestrin, Carlos}, - title = {{XGBoost}: A Scalable Tree Boosting System}, - booktitle = {Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining}, - series = {KDD '16}, - year = {2016}, - isbn = {978-1-4503-4232-2}, - location = {San Francisco, California, USA}, - pages = {785--794}, - numpages = {10}, - url = {http://doi.acm.org/10.1145/2939672.2939785}, - doi = {10.1145/2939672.2939785}, - acmid = {2939785}, - publisher = {ACM}, - address = {New York, NY, USA}, - keywords = {large-scale machine learning}, -} - diff --git a/ml-xgboost/CMakeLists.txt b/ml-xgboost/CMakeLists.txt deleted file mode 100644 index dbc7ae0..0000000 --- a/ml-xgboost/CMakeLists.txt +++ /dev/null @@ -1,310 +0,0 @@ -cmake_minimum_required(VERSION 3.13) -set(CMAKE_CXX_FLAGS "-fstack-protector-all -D_FORTIFY_SOURCE=2 -O2 -Wl,-z,relro,-z,now,-z,noexecstack -s ${CMAKE_CXX_FLAGS}") -set(CMAKE_SKIP_RPATH TRUE) -project(xgboost LANGUAGES CXX C VERSION 1.1.0) -include(cmake/Utils.cmake) -list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules") -cmake_policy(SET CMP0022 NEW) -cmake_policy(SET CMP0079 NEW) -cmake_policy(SET CMP0063 NEW) - -if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13)) - cmake_policy(SET CMP0077 NEW) -endif ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13)) - -message(STATUS "CMake version ${CMAKE_VERSION}") - -if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0) - message(FATAL_ERROR "GCC version must be at least 5.0!") -endif() - -include(${xgboost_SOURCE_DIR}/cmake/FindPrefetchIntrinsics.cmake) -find_prefetch_intrinsics() -include(${xgboost_SOURCE_DIR}/cmake/Version.cmake) -write_version() -set_default_configuration_release() - -#-- Options -option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF) -option(USE_OPENMP "Build with OpenMP support." ON) -option(BUILD_STATIC_LIB "Build static library" OFF) -## Bindings -option(JVM_BINDINGS "Build JVM bindings" ON) -option(R_LIB "Build shared library for R package" OFF) -## Dev -option(USE_DEBUG_OUTPUT "Dump internal training results like gradients and predictions to stdout. -Should only be used for debugging." OFF) -option(GOOGLE_TEST "Build google tests" OFF) -option(WITH_CODE_COVERAGE "with code coverage" OFF) -option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule" OFF) -option(USE_NVTX "Build with cuda pwrofiling annotations. Developers only." OFF) -set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header") -option(RABIT_MOCK "Build rabit with mock" OFF) -option(HIDE_CXX_SYMBOLS "Build shared library and hide all C++ symbols" OFF) -## CUDA -option(USE_CUDA "Build with GPU acceleration" OFF) -option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF) -option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF) -set(GPU_COMPUTE_VER "" CACHE STRING - "Semicolon separated list of compute versions to be built against, e.g. '35;61'") -## Copied From dmlc -option(USE_HDFS "Build with HDFS support" OFF) -option(USE_AZURE "Build with AZURE support" OFF) -option(USE_S3 "Build with S3 support" OFF) -## Sanitizers -option(USE_SANITIZER "Use santizer flags" OFF) -option(SANITIZER_PATH "Path to sanitizes.") -set(ENABLED_SANITIZERS "address" "leak" CACHE STRING - "Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are -address, leak, undefined and thread.") -## Plugins -option(PLUGIN_LZ4 "Build lz4 plugin" OFF) -option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF) -## Force AVX -option(USE_AVX "Use AVX" ON) - -#-- Checks for building XGBoost -if (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug))) - message(SEND_ERROR "Do not enable `USE_DEBUG_OUTPUT' with release build.") -endif (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug))) -if (USE_NCCL AND NOT (USE_CUDA)) - message(SEND_ERROR "`USE_NCCL` must be enabled with `USE_CUDA` flag.") -endif (USE_NCCL AND NOT (USE_CUDA)) -if (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL)) - message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.") -endif (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL)) -if (JVM_BINDINGS AND R_LIB) - message(SEND_ERROR "`R_LIB' is not compatible with `JVM_BINDINGS' as they both have customized configurations.") -endif (JVM_BINDINGS AND R_LIB) -if (R_LIB AND GOOGLE_TEST) - message(WARNING "Some C++ unittests will fail with `R_LIB` enabled, - as R package redirects some functions to R runtime implementation.") -endif (R_LIB AND GOOGLE_TEST) - -include(CheckSymbolExists) -include(CheckIncludeFile) -include(CheckIncludeFileCXX) -if(USE_AVX) - if(MSVC) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX") - else() - EXECUTE_PROCESS(COMMAND uname -a OUTPUT_VARIABLE ARCH) - message(STATUS "Current architechture is ${ARCH}") - - if (${ARCH} MATCHES ".*aarch64.*") - message(STATUS "Turn on ARM intrinsics") - add_compile_options(-DUSE_ARM_INTRINSICS) - endif() - if (${ARCH} MATCHES ".*x86.*") - message(STATUS "Turn on Intel intrinsics") - add_compile_options(-mavx -DUSE_INTEL_INTRINSICS) - endif() - endif() -endif() - -if(WITH_CODE_COVERAGE) - message(STATUS "Enabled code coverage options") - add_compile_options("-ftest-coverage" "-fprofile-arcs") - # actually these options are for compiling but for some targets they are needed in link options - add_link_options("-ftest-coverage" "-fprofile-arcs") - add_link_options("-lgcov") -endif() - -#-- Sanitizer -if (USE_SANITIZER) - include(cmake/Sanitizer.cmake) - enable_sanitizers("${ENABLED_SANITIZERS}") -endif (USE_SANITIZER) - -if (USE_CUDA) - SET(USE_OPENMP ON CACHE BOOL "CUDA requires OpenMP" FORCE) - # `export CXX=' is ignored by CMake CUDA. - set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER}) - message(STATUS "Configured CUDA host compiler: ${CMAKE_CUDA_HOST_COMPILER}") - - enable_language(CUDA) - set(GEN_CODE "") - format_gencode_flags("${GPU_COMPUTE_VER}" GEN_CODE) - message(STATUS "CUDA GEN_CODE: ${GEN_CODE}") -endif (USE_CUDA) - -find_package(Threads REQUIRED) - -if (USE_OPENMP) - if (APPLE) - # Require CMake 3.16+ on Mac OSX, as previous versions of CMake had trouble locating - # OpenMP on Mac. See https://github.com/dmlc/xgboost/pull/5146#issuecomment-568312706 - cmake_minimum_required(VERSION 3.16) - endif (APPLE) - find_package(OpenMP REQUIRED) -endif (USE_OPENMP) - -# dmlc-core -msvc_use_static_runtime() -add_subdirectory(${xgboost_SOURCE_DIR}/dmlc-core) -set_target_properties(dmlc PROPERTIES - CXX_STANDARD 14 - CXX_STANDARD_REQUIRED ON - POSITION_INDEPENDENT_CODE ON) -list(APPEND LINKED_LIBRARIES_PRIVATE dmlc) - -# rabit -set(RABIT_BUILD_DMLC OFF) -set(DMLC_ROOT ${xgboost_SOURCE_DIR}/dmlc-core) -set(RABIT_WITH_R_LIB ${R_LIB}) -add_subdirectory(rabit) - -if (RABIT_MOCK) - list(APPEND LINKED_LIBRARIES_PRIVATE rabit_mock_static) -else() - list(APPEND LINKED_LIBRARIES_PRIVATE rabit) -endif(RABIT_MOCK) -foreach(lib rabit rabit_base rabit_empty rabit_mock rabit_mock_static) - # Explicitly link dmlc to rabit, so that configured header (build_config.h) - # from dmlc is correctly applied to rabit. - if (TARGET ${lib}) - target_link_libraries(${lib} dmlc ${CMAKE_THREAD_LIBS_INIT}) - if (HIDE_CXX_SYMBOLS) # Hide all C++ symbols from Rabit - set_target_properties(${lib} PROPERTIES CXX_VISIBILITY_PRESET hidden) - endif (HIDE_CXX_SYMBOLS) - endif (TARGET ${lib}) -endforeach() - -# Exports some R specific definitions and objects -if (R_LIB) - add_subdirectory(${xgboost_SOURCE_DIR}/R-package) -endif (R_LIB) - -# core xgboost -list(APPEND LINKED_LIBRARIES_PRIVATE Threads::Threads ${CMAKE_THREAD_LIBS_INIT}) -add_subdirectory(${xgboost_SOURCE_DIR}/plugin) -add_subdirectory(${xgboost_SOURCE_DIR}/src) -target_link_libraries(objxgboost PUBLIC dmlc) -set(XGBOOST_OBJ_SOURCES "${XGBOOST_OBJ_SOURCES};$") - -#-- library -if (BUILD_STATIC_LIB) - add_library(xgboost STATIC ${XGBOOST_OBJ_SOURCES}) - string(TIMESTAMP time_stamp "%s") - add_custom_command(TARGET xgboost - POST_BUILD - COMMAND mkdir ${CMAKE_CURRENT_SOURCE_DIR}/lib/tmp_${time_stamp} - COMMAND mv ${CMAKE_CURRENT_SOURCE_DIR}/lib/libxgboost.a ${CMAKE_CURRENT_SOURCE_DIR}/lib/tmp_${time_stamp}/ - COMMAND cp ${CMAKE_CURRENT_SOURCE_DIR}/lib/libboostkit_xgboost_kernel.a ${CMAKE_CURRENT_SOURCE_DIR}/lib/tmp_${time_stamp}/ - VERBATIM - ) - add_custom_command(TARGET xgboost - POST_BUILD - COMMAND ar -x libxgboost.a - COMMAND ar -x libboostkit_xgboost_kernel.a - COMMAND sh -c "ar -qcs ${CMAKE_CURRENT_SOURCE_DIR}/lib/libxgboost.a ${CMAKE_CURRENT_SOURCE_DIR}/lib/tmp_${time_stamp}/*.o" - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/lib/tmp_${time_stamp}/ - VERBATIM - ) - add_custom_command(TARGET xgboost - POST_BUILD - COMMAND rm -rf ${CMAKE_CURRENT_SOURCE_DIR}/lib/tmp_${time_stamp} - VERBATIM - ) -else (BUILD_STATIC_LIB) - add_library(xgboost SHARED ${XGBOOST_OBJ_SOURCES}) - target_link_libraries(xgboost PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/lib/libboostkit_xgboost_kernel.so) -endif (BUILD_STATIC_LIB) - -#-- Hide all C++ symbols -if (HIDE_CXX_SYMBOLS) - set_target_properties(objxgboost PROPERTIES CXX_VISIBILITY_PRESET hidden) - set_target_properties(xgboost PROPERTIES CXX_VISIBILITY_PRESET hidden) -endif (HIDE_CXX_SYMBOLS) - -target_include_directories(xgboost - INTERFACE - $ - $) -target_link_libraries(xgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE}) - -# This creates its own shared library `xgboost4j'. -if (JVM_BINDINGS) - # To ensure open source independently compliation - add_subdirectory(${PROJECT_SOURCE_DIR}/kernel_include/boostkit_xgboost_kernel_client) - add_subdirectory(${xgboost_SOURCE_DIR}/jvm-packages) -endif (JVM_BINDINGS) -#-- End shared library - -#-- CLI for xgboost -add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc ${XGBOOST_OBJ_SOURCES}) - -target_include_directories(runxgboost - PRIVATE - ${xgboost_SOURCE_DIR}/include - ${xgboost_SOURCE_DIR}/dmlc-core/include - ${xgboost_SOURCE_DIR}/rabit/include - ${xgboost_SOURCE_DIR}/kernel_include) -target_link_libraries(runxgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE}) -target_link_libraries(runxgboost PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/lib/libboostkit_xgboost_kernel.so) -set_target_properties( - runxgboost PROPERTIES - OUTPUT_NAME xgboost - CXX_STANDARD 14 - CXX_STANDARD_REQUIRED ON) -#-- End CLI for xgboost - -set_output_directory(runxgboost ${xgboost_SOURCE_DIR}) -set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib) -# Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names -add_dependencies(xgboost runxgboost) - -#-- Installing XGBoost -if (R_LIB) - set_target_properties(xgboost PROPERTIES PREFIX "") - if (APPLE) - set_target_properties(xgboost PROPERTIES SUFFIX ".so") - endif (APPLE) - setup_rpackage_install_target(xgboost ${CMAKE_CURRENT_BINARY_DIR}) - set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst") -endif (R_LIB) -if (MINGW) - set_target_properties(xgboost PROPERTIES PREFIX "") -endif (MINGW) - -if (BUILD_C_DOC) - include(cmake/Doc.cmake) - run_doxygen() -endif (BUILD_C_DOC) - -include(GNUInstallDirs) -# Install all headers. Please note that currently the C++ headers does not form an "API". -install(DIRECTORY ${xgboost_SOURCE_DIR}/include/xgboost - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) - -install(TARGETS xgboost runxgboost - EXPORT XGBoostTargets - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - INCLUDES DESTINATION ${LIBLEGACY_INCLUDE_DIRS}) -install(EXPORT XGBoostTargets - FILE XGBoostTargets.cmake - NAMESPACE xgboost:: - DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost) - -include(CMakePackageConfigHelpers) -configure_package_config_file( - ${CMAKE_CURRENT_LIST_DIR}/cmake/xgboost-config.cmake.in - ${CMAKE_CURRENT_BINARY_DIR}/cmake/xgboost-config.cmake - INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost) -write_basic_package_version_file( - ${CMAKE_BINARY_DIR}/cmake/xgboost-config-version.cmake - VERSION ${XGBOOST_VERSION} - COMPATIBILITY AnyNewerVersion) -install( - FILES - ${CMAKE_BINARY_DIR}/cmake/xgboost-config.cmake - ${CMAKE_BINARY_DIR}/cmake/xgboost-config-version.cmake - DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost) - - -# For MSVC: Call msvc_use_static_runtime() once again to completely -# replace /MD with /MT. See https://github.com/dmlc/xgboost/issues/4462 -# for issues caused by mixing of /MD and /MT flags -msvc_use_static_runtime() diff --git a/ml-xgboost/CONTRIBUTORS.md b/ml-xgboost/CONTRIBUTORS.md deleted file mode 100644 index e426f85..0000000 --- a/ml-xgboost/CONTRIBUTORS.md +++ /dev/null @@ -1,104 +0,0 @@ -Contributors of DMLC/XGBoost -============================ -XGBoost has been developed and used by a group of active community. Everyone is more than welcomed to is a great way to make the project better and more accessible to more users. - -Project Management Committee(PMC) ----------- -The Project Management Committee(PMC) consists group of active committers that moderate the discussion, manage the project release, and proposes new committer/PMC members. - -* [Tianqi Chen](https://github.com/tqchen), University of Washington - - Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project. -* [Michael Benesty](https://github.com/pommedeterresautee) - - Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R. -* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial - - Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages. -* [Nan Zhu](https://github.com/CodingCat), Uber - - Nan is a software engineer in Uber. He contributed mostly in JVM packages. -* [Jiaming Yuan](https://github.com/trivialfis) - - Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase. -* [Hyunsu Cho](http://hyunsu-cho.io/), NVIDIA - - Hyunsu is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater. -* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato - - Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration. -* [Hongliang Liu](https://github.com/phunterlau) - - -Committers ----------- -Committers are people who have made substantial contribution to the project and granted write access to the project. - -* [Tong He](https://github.com/hetong007), Amazon AI - - Tong is an applied scientist in Amazon AI. He is the maintainer of XGBoost R package. -* [Vadim Khotilovich](https://github.com/khotilov) - - Vadim contributes many improvements in R and core packages. -* [Bing Xu](https://github.com/antinucleon) - - Bing is the original creator of XGBoost Python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl). -* [Sergei Lebedev](https://github.com/superbobry), Criteo - - Sergei is a software engineer in Criteo. He contributed mostly in JVM packages. -* [Scott Lundberg](http://scottlundberg.com/), University of Washington - - Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package. - - -Become a Committer ------------------- -XGBoost is a opensource project and we are actively looking for new committers who are willing to help maintaining and lead the project. -Committers comes from contributors who: -* Made substantial contribution to the project. -* Willing to spent time on maintaining and lead the project. - -New committers will be proposed by current committer members, with support from more than two of current committers. - -List of Contributors --------------------- -* [Full List of Contributors](https://github.com/dmlc/xgboost/graphs/contributors) - - To contributors: please add your name to the list when you submit a patch to the project:) -* [Kailong Chen](https://github.com/kalenhaha) - - Kailong is an early contributor of XGBoost, he is creator of ranking objectives in XGBoost. -* [Skipper Seabold](https://github.com/jseabold) - - Skipper is the major contributor to the scikit-learn module of XGBoost. -* [Zygmunt Zając](https://github.com/zygmuntz) - - Zygmunt is the master behind the early stopping feature frequently used by kagglers. -* [Ajinkya Kale](https://github.com/ajkl) -* [Boliang Chen](https://github.com/cblsjtu) -* [Yangqing Men](https://github.com/yanqingmen) - - Yangqing is the creator of XGBoost java package. -* [Engpeng Yao](https://github.com/yepyao) -* [Giulio](https://github.com/giuliohome) - - Giulio is the creator of Windows project of XGBoost -* [Jamie Hall](https://github.com/nerdcha) - - Jamie is the initial creator of XGBoost scikit-learn module. -* [Yen-Ying Lee](https://github.com/white1033) -* [Masaaki Horikoshi](https://github.com/sinhrks) - - Masaaki is the initial creator of XGBoost Python plotting module. -* [daiyl0320](https://github.com/daiyl0320) - - daiyl0320 contributed patch to XGBoost distributed version more robust, and scales stably on TB scale datasets. -* [Huayi Zhang](https://github.com/irachex) -* [Johan Manders](https://github.com/johanmanders) -* [yoori](https://github.com/yoori) -* [Mathias Müller](https://github.com/far0n) -* [Sam Thomson](https://github.com/sammthomson) -* [ganesh-krishnan](https://github.com/ganesh-krishnan) -* [Damien Carol](https://github.com/damiencarol) -* [Alex Bain](https://github.com/convexquad) -* [Baltazar Bieniek](https://github.com/bbieniek) -* [Adam Pocock](https://github.com/Craigacp) -* [Gideon Whitehead](https://github.com/gaw89) -* [Yi-Lin Juang](https://github.com/frankyjuang) -* [Andrew Hannigan](https://github.com/andrewhannigan) -* [Andy Adinets](https://github.com/canonizer) -* [Henry Gouk](https://github.com/henrygouk) -* [Pierre de Sahb](https://github.com/pdesahb) -* [liuliang01](https://github.com/liuliang01) - - liuliang01 added support for the qid column for LibSVM input format. This makes ranking task easier in distributed setting. -* [Andrew Thia](https://github.com/BlueTea88) - - Andrew Thia implemented feature interaction constraints -* [Wei Tian](https://github.com/weitian) -* [Chen Qin](https://github.com/chenqin) -* [Sam Wilkinson](https://samwilkinson.io) -* [Matthew Jones](https://github.com/mt-jones) -* [Jiaxiang Li](https://github.com/JiaxiangBU) -* [Bryan Woods](https://github.com/bryan-woods) - - Bryan added support for cross-validation for the ranking objective -* [Haoda Fu](https://github.com/fuhaoda) -* [Evan Kepner](https://github.com/EvanKepner) - - Evan Kepner added support for os.PathLike file paths in Python diff --git a/ml-xgboost/Jenkinsfile b/ml-xgboost/Jenkinsfile deleted file mode 100644 index f2bd51c..0000000 --- a/ml-xgboost/Jenkinsfile +++ /dev/null @@ -1,435 +0,0 @@ -#!/usr/bin/groovy -// -*- mode: groovy -*- -// Jenkins pipeline -// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ - -// Command to run command inside a docker container -dockerRun = 'tests/ci_build/ci_build.sh' - -import groovy.transform.Field - -@Field -def commit_id // necessary to pass a variable from one stage to another - -pipeline { - // Each stage specify its own agent - agent none - - environment { - DOCKER_CACHE_ECR_ID = '492475357299' - DOCKER_CACHE_ECR_REGION = 'us-west-2' - } - - // Setup common job properties - options { - ansiColor('xterm') - timestamps() - timeout(time: 240, unit: 'MINUTES') - buildDiscarder(logRotator(numToKeepStr: '10')) - preserveStashes() - } - - // Build stages - stages { - stage('Jenkins Linux: Get sources') { - agent { label 'linux && cpu' } - steps { - script { - checkoutSrcs() - commit_id = "${GIT_COMMIT}" - } - stash name: 'srcs' - milestone ordinal: 1 - } - } - stage('Jenkins Linux: Formatting Check') { - agent none - steps { - script { - parallel ([ - 'clang-tidy': { ClangTidy() }, - 'lint': { Lint() }, - 'sphinx-doc': { SphinxDoc() }, - 'doxygen': { Doxygen() } - ]) - } - milestone ordinal: 2 - } - } - stage('Jenkins Linux: Build') { - agent none - steps { - script { - parallel ([ - 'build-cpu': { BuildCPU() }, - 'build-cpu-rabit-mock': { BuildCPUMock() }, - 'build-cpu-non-omp': { BuildCPUNonOmp() }, - 'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') }, - 'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') }, - 'build-jvm-packages': { BuildJVMPackages(spark_version: '2.4.3') }, - 'build-jvm-doc': { BuildJVMDoc() } - ]) - } - milestone ordinal: 3 - } - } - stage('Jenkins Linux: Test') { - agent none - steps { - script { - parallel ([ - 'test-python-cpu': { TestPythonCPU() }, - 'test-python-gpu-cuda9.0': { TestPythonGPU(cuda_version: '9.0') }, - 'test-python-gpu-cuda10.0': { TestPythonGPU(cuda_version: '10.0') }, - 'test-python-gpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1') }, - 'test-python-mgpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1', multi_gpu: true) }, - 'test-cpp-gpu': { TestCppGPU(cuda_version: '10.1') }, - 'test-cpp-mgpu': { TestCppGPU(cuda_version: '10.1', multi_gpu: true) }, - 'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '2.4.3') }, - 'test-jvm-jdk11': { CrossTestJVMwithJDK(jdk_version: '11') }, - 'test-jvm-jdk12': { CrossTestJVMwithJDK(jdk_version: '12') }, - 'test-r-3.5.3': { TestR(use_r35: true) } - ]) - } - milestone ordinal: 4 - } - } - stage('Jenkins Linux: Deploy') { - agent none - steps { - script { - parallel ([ - 'deploy-jvm-packages': { DeployJVMPackages(spark_version: '2.4.3') } - ]) - } - milestone ordinal: 5 - } - } - } -} - -// check out source code from git -def checkoutSrcs() { - retry(5) { - try { - timeout(time: 2, unit: 'MINUTES') { - checkout scm - sh 'git submodule update --init' - } - } catch (exc) { - deleteDir() - error "Failed to fetch source codes" - } - } -} - -def ClangTidy() { - node('linux && cpu') { - unstash name: 'srcs' - echo "Running clang-tidy job..." - def container_type = "clang_tidy" - def docker_binary = "docker" - def dockerArgs = "--build-arg CUDA_VERSION=10.1" - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py - """ - deleteDir() - } -} - -def Lint() { - node('linux && cpu') { - unstash name: 'srcs' - echo "Running lint..." - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} make lint - """ - deleteDir() - } -} - -def SphinxDoc() { - node('linux && cpu') { - unstash name: 'srcs' - echo "Running sphinx-doc..." - def container_type = "cpu" - def docker_binary = "docker" - def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e SPHINX_GIT_BRANCH=${BRANCH_NAME}'" - sh """#!/bin/bash - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} make -C doc html - """ - deleteDir() - } -} - -def Doxygen() { - node('linux && cpu') { - unstash name: 'srcs' - echo "Running doxygen..." - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/doxygen.sh ${BRANCH_NAME} - """ - echo 'Uploading doc...' - s3Upload file: "build/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "doxygen/${BRANCH_NAME}.tar.bz2" - deleteDir() - } -} - -def BuildCPU() { - node('linux && cpu') { - unstash name: 'srcs' - echo "Build CPU" - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} rm -fv dmlc-core/include/dmlc/build_config_default.h - # This step is not necessary, but here we include it, to ensure that DMLC_CORE_USE_CMAKE flag is correctly propagated - # We want to make sure that we use the configured header build/dmlc/build_config.h instead of include/dmlc/build_config_default.h. - # See discussion at https://github.com/dmlc/xgboost/issues/5510 - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh - ${dockerRun} ${container_type} ${docker_binary} build/testxgboost - """ - // Sanitizer test - def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 -e UBSAN_OPTIONS=print_stacktrace=1:log_path=ubsan_error.log --cap-add SYS_PTRACE'" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \ - -DCMAKE_BUILD_TYPE=Debug -DSANITIZER_PATH=/usr/lib/x86_64-linux-gnu/ - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} build/testxgboost - """ - - stash name: 'xgboost_cli', includes: 'xgboost' - deleteDir() - } -} - -def BuildCPUMock() { - node('linux && cpu') { - unstash name: 'srcs' - echo "Build CPU with rabit mock" - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_mock_cmake.sh - """ - echo 'Stashing rabit C++ test executable (xgboost)...' - stash name: 'xgboost_rabit_tests', includes: 'xgboost' - deleteDir() - } -} - -def BuildCPUNonOmp() { - node('linux && cpu') { - unstash name: 'srcs' - echo "Build CPU without OpenMP" - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_OPENMP=OFF - """ - echo "Running Non-OpenMP C++ test..." - sh """ - ${dockerRun} ${container_type} ${docker_binary} build/testxgboost - """ - deleteDir() - } -} - -def BuildCUDA(args) { - node('linux && cpu') { - unstash name: 'srcs' - echo "Build with CUDA ${args.cuda_version}" - def container_type = "gpu_build" - def docker_binary = "docker" - def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}" - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON -DHIDE_CXX_SYMBOLS=ON - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal" - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2010_x86_64 - """ - // Stash wheel for CUDA 10.0 target - if (args.cuda_version == '10.0') { - echo 'Stashing Python wheel...' - stash name: 'xgboost_whl_cuda10', includes: 'python-package/dist/*.whl' - path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/" - s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl' - echo 'Stashing C++ test executable (testxgboost)...' - stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost' - } - deleteDir() - } -} - -def BuildJVMPackages(args) { - node('linux && cpu') { - unstash name: 'srcs' - echo "Build Boostkit-XGBoost4J-Spark with Spark ${args.spark_version}" - def container_type = "jvm" - def docker_binary = "docker" - // Use only 4 CPU cores - def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--cpuset-cpus 0-3'" - sh """ - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_packages.sh ${args.spark_version} - """ - echo 'Stashing Boostkit-XGBoost4J JAR...' - stash name: 'boostkit-xgboost4j_jar', includes: "jvm-packages/boostkit-xgboost4j/target/*.jar,jvm-packages/boostkit-xgboost4j-spark/target/*.jar,jvm-packages/boostkit-xgboost4j-example/target/*.jar" - deleteDir() - } -} - -def BuildJVMDoc() { - node('linux && cpu') { - unstash name: 'srcs' - echo "Building JVM doc..." - def container_type = "jvm" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME} - """ - echo 'Uploading doc...' - s3Upload file: "jvm-packages/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${BRANCH_NAME}.tar.bz2" - deleteDir() - } -} - -def TestPythonCPU() { - node('linux && cpu') { - unstash name: 'xgboost_whl_cuda10' - unstash name: 'srcs' - unstash name: 'xgboost_cli' - echo "Test Python CPU" - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu-py35 - """ - deleteDir() - } -} - -def TestPythonGPU(args) { - nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu' - node(nodeReq) { - unstash name: 'xgboost_whl_cuda10' - unstash name: 'srcs' - echo "Test Python GPU: CUDA ${args.cuda_version}" - def container_type = "gpu" - def docker_binary = "nvidia-docker" - def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}" - if (args.multi_gpu) { - echo "Using multiple GPUs" - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu - """ - if (args.cuda_version != '9.0') { - echo "Running tests with cuDF..." - sh """ - ${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu-cudf - """ - } - } else { - echo "Using a single GPU" - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh gpu - """ - if (args.cuda_version != '9.0') { - echo "Running tests with cuDF..." - sh """ - ${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh cudf - """ - } - } - // For CUDA 10.0 target, run cuDF tests too - deleteDir() - } -} - -def TestCppRabit() { - node(nodeReq) { - unstash name: 'xgboost_rabit_tests' - unstash name: 'srcs' - echo "Test C++, rabit mock on" - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/runxgb.sh xgboost tests/ci_build/approx.conf.in - """ - deleteDir() - } -} - -def TestCppGPU(args) { - nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu' - node(nodeReq) { - unstash name: 'xgboost_cpp_tests' - unstash name: 'srcs' - echo "Test C++, CUDA ${args.cuda_version}" - def container_type = "gpu" - def docker_binary = "nvidia-docker" - def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}" - if (args.multi_gpu) { - echo "Using multiple GPUs" - sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=*.MGPU_*" - } else { - echo "Using a single GPU" - sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=-*.MGPU_*" - } - deleteDir() - } -} - -def CrossTestJVMwithJDK(args) { - node('linux && cpu') { - unstash name: 'boostkit-xgboost4j_jar' - unstash name: 'srcs' - if (args.spark_version != null) { - echo "Test Boostkit-XGBoost4J on a machine with JDK ${args.jdk_version}, Spark ${args.spark_version}" - } else { - echo "Test Boostkit-XGBoost4J on a machine with JDK ${args.jdk_version}" - } - def container_type = "jvm_cross" - def docker_binary = "docker" - def spark_arg = (args.spark_version != null) ? "--build-arg SPARK_VERSION=${args.spark_version}" : "" - def docker_args = "--build-arg JDK_VERSION=${args.jdk_version} ${spark_arg}" - // Run integration tests only when spark_version is given - def docker_extra_params = (args.spark_version != null) ? "CI_DOCKER_EXTRA_PARAMS_INIT='-e RUN_INTEGRATION_TEST=1'" : "" - sh """ - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_jvm_cross.sh - """ - deleteDir() - } -} - -def TestR(args) { - node('linux && cpu') { - unstash name: 'srcs' - echo "Test R package" - def container_type = "rproject" - def docker_binary = "docker" - def use_r35_flag = (args.use_r35) ? "1" : "0" - def docker_args = "--build-arg USE_R35=${use_r35_flag}" - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_test_rpkg.sh || tests/ci_build/print_r_stacktrace.sh - """ - deleteDir() - } -} - -def DeployJVMPackages(args) { - node('linux && cpu') { - unstash name: 'srcs' - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - echo 'Deploying to xgboost-maven-repo S3 repo...' - def container_type = "jvm" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/deploy_jvm_packages.sh ${args.spark_version} - """ - } - deleteDir() - } -} diff --git a/ml-xgboost/Jenkinsfile-win64 b/ml-xgboost/Jenkinsfile-win64 deleted file mode 100644 index 15dc345..0000000 --- a/ml-xgboost/Jenkinsfile-win64 +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/groovy -// -*- mode: groovy -*- - -/* Jenkins pipeline for Windows AMD64 target */ - -import groovy.transform.Field - -@Field -def commit_id // necessary to pass a variable from one stage to another - -pipeline { - agent none - // Build stages - stages { - stage('Jenkins Win64: Get sources') { - agent { label 'win64 && build' } - steps { - script { - checkoutSrcs() - commit_id = "${GIT_COMMIT}" - } - stash name: 'srcs' - milestone ordinal: 1 - } - } - stage('Jenkins Win64: Build') { - agent none - steps { - script { - parallel ([ - 'build-win64-cuda9.0': { BuildWin64() } - ]) - } - milestone ordinal: 2 - } - } - stage('Jenkins Win64: Test') { - agent none - steps { - script { - parallel ([ - 'test-win64-cpu': { TestWin64CPU() }, - 'test-win64-gpu-cuda9.0': { TestWin64GPU(cuda_target: 'cuda9') }, - 'test-win64-gpu-cuda10.0': { TestWin64GPU(cuda_target: 'cuda10_0') }, - 'test-win64-gpu-cuda10.1': { TestWin64GPU(cuda_target: 'cuda10_1') } - ]) - } - milestone ordinal: 3 - } - } - } -} - -// check out source code from git -def checkoutSrcs() { - retry(5) { - try { - timeout(time: 2, unit: 'MINUTES') { - checkout scm - sh 'git submodule update --init' - } - } catch (exc) { - deleteDir() - error "Failed to fetch source codes" - } - } -} - -def BuildWin64() { - node('win64 && build') { - unstash name: 'srcs' - echo "Building XGBoost for Windows AMD64 target..." - bat "nvcc --version" - bat """ - mkdir build - cd build - cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON - """ - bat """ - cd build - "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\MSBuild\\15.0\\Bin\\MSBuild.exe" xgboost.sln /m /p:Configuration=Release /nodeReuse:false - """ - bat """ - cd python-package - conda activate && python setup.py bdist_wheel --universal && for /R %%i in (dist\\*.whl) DO python ../tests/ci_build/rename_whl.py "%%i" ${commit_id} win_amd64 - """ - echo "Insert vcomp140.dll (OpenMP runtime) into the wheel..." - bat """ - cd python-package\\dist - COPY /B ..\\..\\tests\\ci_build\\insert_vcomp140.py - conda activate && python insert_vcomp140.py *.whl - """ - echo 'Stashing Python wheel...' - stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl' - path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/" - s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl' - echo 'Stashing C++ test executable (testxgboost)...' - stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe' - stash name: 'xgboost_cli', includes: 'xgboost.exe' - deleteDir() - } -} - -def TestWin64CPU() { - node('win64 && cpu') { - unstash name: 'srcs' - unstash name: 'xgboost_whl' - unstash name: 'xgboost_cli' - echo "Test Win64 CPU" - echo "Installing Python wheel..." - bat "conda activate && (python -m pip uninstall -y xgboost || cd .)" - bat """ - conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i" - """ - echo "Installing Python dependencies..." - bat """ - conda activate && conda upgrade scikit-learn pandas numpy - """ - echo "Running Python tests..." - bat "conda activate && python -m pytest -v -s --fulltrace tests\\python" - bat "conda activate && python -m pip uninstall -y xgboost" - deleteDir() - } -} - -def TestWin64GPU(args) { - node("win64 && gpu && ${args.cuda_target}") { - unstash name: 'srcs' - unstash name: 'xgboost_whl' - unstash name: 'xgboost_cpp_tests' - echo "Test Win64 GPU (${args.cuda_target})" - bat "nvcc --version" - echo "Running C++ tests..." - bat "build\\testxgboost.exe" - echo "Installing Python wheel..." - bat "conda activate && (python -m pip uninstall -y xgboost || cd .)" - bat """ - conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i" - """ - echo "Installing Python dependencies..." - bat """ - conda activate && conda upgrade scikit-learn pandas numpy - """ - echo "Running Python tests..." - bat """ - conda activate && python -m pytest -v -s --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu - """ - bat "conda activate && python -m pip uninstall -y xgboost" - deleteDir() - } -} diff --git a/ml-xgboost/LICENSE b/ml-xgboost/LICENSE deleted file mode 100644 index 90c0ff9..0000000 --- a/ml-xgboost/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2019 by Contributors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ml-xgboost/Makefile b/ml-xgboost/Makefile deleted file mode 100644 index a49d307..0000000 --- a/ml-xgboost/Makefile +++ /dev/null @@ -1,147 +0,0 @@ -ifndef DMLC_CORE - DMLC_CORE = dmlc-core -endif - -ifndef RABIT - RABIT = rabit -endif - -ROOTDIR = $(CURDIR) - -# workarounds for some buggy old make & msys2 versions seen in windows -ifeq (NA, $(shell test ! -d "$(ROOTDIR)" && echo NA )) - $(warning Attempting to fix non-existing ROOTDIR [$(ROOTDIR)]) - ROOTDIR := $(shell pwd) - $(warning New ROOTDIR [$(ROOTDIR)] $(shell test -d "$(ROOTDIR)" && echo " is OK" )) -endif -MAKE_OK := $(shell "$(MAKE)" -v 2> /dev/null) -ifndef MAKE_OK - $(warning Attempting to recover non-functional MAKE [$(MAKE)]) - MAKE := $(shell which make 2> /dev/null) - MAKE_OK := $(shell "$(MAKE)" -v 2> /dev/null) -endif -$(warning MAKE [$(MAKE)] - $(if $(MAKE_OK),checked OK,PROBLEM)) - -include $(DMLC_CORE)/make/dmlc.mk - -# set compiler defaults for OSX versus *nix -# let people override either -OS := $(shell uname) -ifeq ($(OS), Darwin) -ifndef CC -export CC = $(if $(shell which clang), clang, gcc) -endif -ifndef CXX -export CXX = $(if $(shell which clang++), clang++, g++) -endif -else -# linux defaults -ifndef CC -export CC = gcc -endif -ifndef CXX -export CXX = g++ -endif -endif - -export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) -CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include - -ifeq ($(TEST_COVER), 1) - CFLAGS += -g -O0 -fprofile-arcs -ftest-coverage -else - CFLAGS += -O3 -funroll-loops -endif - -ifndef LINT_LANG - LINT_LANG= "all" -endif - -# specify tensor path -.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck - -build/%.o: src/%.cc - @mkdir -p $(@D) - $(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d - $(CXX) -c $(CFLAGS) $< -o $@ - -# The should be equivalent to $(ALL_OBJ) except for build/cli_main.o -amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc - $(CXX) -c $(CFLAGS) $< -o $@ - -rcpplint: - python3 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src - -lint: rcpplint - python3 dmlc-core/scripts/lint.py --exclude_path python-package/xgboost/dmlc-core \ - python-package/xgboost/include python-package/xgboost/lib \ - python-package/xgboost/make python-package/xgboost/rabit \ - python-package/xgboost/src --pylint-rc ${PWD}/python-package/.pylintrc xgboost \ - ${LINT_LANG} include src python-package - -ifeq ($(TEST_COVER), 1) -cover: check - @- $(foreach COV_OBJ, $(COVER_OBJ), \ - gcov -pbcul -o $(shell dirname $(COV_OBJ)) $(COV_OBJ) > gcov.log || cat gcov.log; \ - ) -endif - -clean: - $(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost - $(RM) -rf build_tests *.gcov tests/cpp/xgboost_test - if [ -d "R-package/src" ]; then \ - cd R-package/src; \ - $(RM) -rf rabit src include dmlc-core amalgamation *.so *.dll; \ - cd $(ROOTDIR); \ - fi - -clean_all: clean - cd $(DMLC_CORE); "$(MAKE)" clean; cd $(ROOTDIR) - cd $(RABIT); "$(MAKE)" clean; cd $(ROOTDIR) - -# create pip source dist (sdist) pack for PyPI -pippack: clean_all - cd python-package; python setup.py sdist; mv dist/*.tar.gz ..; cd .. - -# Script to make a clean installable R package. -Rpack: clean_all - rm -rf xgboost xgboost*.tar.gz - cp -r R-package xgboost - rm -rf xgboost/src/*.o xgboost/src/*.so xgboost/src/*.dll - rm -rf xgboost/src/*/*.o - rm -rf xgboost/demo/*.model xgboost/demo/*.buffer xgboost/demo/*.txt - rm -rf xgboost/demo/runall.R - cp -r src xgboost/src/src - cp -r include xgboost/src/include - cp -r amalgamation xgboost/src/amalgamation - mkdir -p xgboost/src/rabit - cp -r rabit/include xgboost/src/rabit/include - cp -r rabit/src xgboost/src/rabit/src - rm -rf xgboost/src/rabit/src/*.o - mkdir -p xgboost/src/dmlc-core - cp -r dmlc-core/include xgboost/src/dmlc-core/include - cp -r dmlc-core/src xgboost/src/dmlc-core/src - cp ./LICENSE xgboost -# Modify PKGROOT in Makevars.in - cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.in -# Configure Makevars.win (Windows-specific Makevars, likely using MinGW) - cp xgboost/src/Makevars.in xgboost/src/Makevars.win - cat xgboost/src/Makevars.in| sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.win - sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CXXFLAGS\)/g' xgboost/src/Makevars.win - sed -i -e 's/-pthread/$$\(SHLIB_PTHREAD_FLAGS\)/g' xgboost/src/Makevars.win - sed -i -e 's/@ENDIAN_FLAG@/-DDMLC_CMAKE_LITTLE_ENDIAN=1/g' xgboost/src/Makevars.win - sed -i -e 's/@BACKTRACE_LIB@//g' xgboost/src/Makevars.win - sed -i -e 's/@OPENMP_LIB@//g' xgboost/src/Makevars.win - rm -f xgboost/src/Makevars.win-e # OSX sed create this extra file; remove it - bash R-package/remove_warning_suppression_pragma.sh - rm xgboost/remove_warning_suppression_pragma.sh - -Rbuild: Rpack - R CMD build --no-build-vignettes xgboost - rm -rf xgboost - -Rcheck: Rbuild - R CMD check xgboost*.tar.gz - --include build/*.d --include build/*/*.d diff --git a/ml-xgboost/NEWS.md b/ml-xgboost/NEWS.md deleted file mode 100644 index 777fa6c..0000000 --- a/ml-xgboost/NEWS.md +++ /dev/null @@ -1,1100 +0,0 @@ -XGBoost Change Log -================== - -This file records the changes in xgboost library in reverse chronological order. - -## v1.0.0 (2020.02.19) -This release marks a major milestone for the XGBoost project. - -### Apache-style governance, contribution policy, and semantic versioning (#4646, #4659) -* Starting with 1.0.0 release, the XGBoost Project is adopting Apache-style governance. The full community guideline is [available in the doc website](https://xgboost.readthedocs.io/en/release_1.0.0/contrib/community.html). Note that we now have Project Management Committee (PMC) who would steward the project on the long-term basis. The PMC is also entrusted to run and fund the project's continuous integration (CI) infrastructure (https://xgboost-ci.net). -* We also adopt the [semantic versioning](https://semver.org/). See [our release versioning policy](https://xgboost.readthedocs.io/en/release_1.0.0/contrib/release.html). - -### Better performance scaling for multi-core CPUs (#4502, #4529, #4716, #4851, #5008, #5107, #5138, #5156) -* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). Previous effort #4529 was replaced with a series of pull requests (#5107, #5138, #5156) aimed at achieving the same performance benefits while keeping the C++ codebase legible. The latest performance benchmark results show [up to 5x speedup on Intel CPUs with many cores](https://github.com/dmlc/xgboost/pull/5156#issuecomment-580024413). Note: #5244, which concludes the effort, will become part of the upcoming release 1.1.0. - -### Improved installation experience on Mac OSX (#4672, #5074, #5080, #5146, #5240) -* It used to be quite complicated to install XGBoost on Mac OSX. XGBoost uses OpenMP to distribute work among multiple CPU cores, and Mac's default C++ compiler (Apple Clang) does not come with OpenMP. Existing work-around (using another C++ compiler) was complex and prone to fail with cryptic diagnosis (#4933, #4949, #4969). -* Now it only takes two commands to install XGBoost: `brew install libomp` followed by `pip install xgboost`. The installed XGBoost will use all CPU cores. -* Even better, XGBoost is now available from Homebrew: `brew install xgboost`. See Homebrew/homebrew-core#50467. -* Previously, if you installed the XGBoost R package using the command `install.packages('xgboost')`, it could only use a single CPU core and you would experience slow training performance. With 1.0.0 release, the R package will use all CPU cores out of box. - -### Distributed XGBoost now available on Kubernetes (#4621, #4939) -* Check out the [tutorial for setting up distributed XGBoost on a Kubernetes cluster](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/kubernetes.html). - -### Ruby binding for XGBoost (#4856) - -### New Native Dask interface for multi-GPU and multi-node scaling (#4473, #4507, #4617, #4819, #4907, #4914, #4941, #4942, #4951, #4973, #5048, #5077, #5144, #5270) -* XGBoost now integrates seamlessly with [Dask](https://dask.org/), a lightweight distributed framework for data processing. Together with the first-class support for cuDF data frames (see below), it is now easier than ever to create end-to-end data pipeline running on one or more NVIDIA GPUs. -* Multi-GPU training with Dask is now up to 20% faster than the previous release (#4914, #4951). - -### First-class support for cuDF data frames and cuPy arrays (#4737, #4745, #4794, #4850, #4891, #4902, #4918, #4927, #4928, #5053, #5189, #5194, #5206, #5219, #5225) -* [cuDF](https://github.com/rapidsai/cudf) is a data frame library for loading and processing tabular data on NVIDIA GPUs. It provides a Pandas-like API. -* [cuPy](https://github.com/cupy/cupy) implements a NumPy-compatible multi-dimensional array on NVIDIA GPUs. -* Now users can keep the data on the GPU memory throughout the end-to-end data pipeline, obviating the need for copying data between the main memory and GPU memory. -* XGBoost can accept any data structure that exposes `__array_interface__` signature, opening way to support other columar formats that are compatible with Apache Arrow. - -### [Feature interaction constraint](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/feature_interaction_constraint.html) is now available with `approx` and `gpu_hist` algorithms (#4534, #4587, #4596, #5034). - -### Learning to rank is now GPU accelerated (#4873, #5004, #5129) -* Supported ranking objectives: NDGC, Map, Pairwise. -* [Up to 2x improved training performance on GPUs](https://devblogs.nvidia.com/learning-to-rank-with-xgboost-and-gpu/). - -### Enable `gamma` parameter for GPU training (#4874, #4953) -* The `gamma` parameter specifies the minimum loss reduction required to add a new split in a tree. A larger value for `gamma` has the effect of pre-pruning the tree, by making harder to add splits. - -### External memory for GPU training (#4486, #4526, #4747, #4833, #4879, #5014) -* It is now possible to use NVIDIA GPUs even when the size of training data exceeds the available GPU memory. Note that the external memory support for GPU is still experimental. #5093 will further improve performance and will become part of the upcoming release 1.1.0. -* RFC for enabling external memory with GPU algorithms: #4357 - -### Improve Scikit-Learn interface (#4558, #4842, #4929, #5049, #5151, #5130, #5227) -* Many users of XGBoost enjoy the convenience and breadth of Scikit-Learn ecosystem. In this release, we revise the Scikit-Learn API of XGBoost (`XGBRegressor`, `XGBClassifier`, and `XGBRanker`) to achieve feature parity with the traditional XGBoost interface (`xgboost.train()`). -* Insert check to validate data shapes. -* Produce an error message if `eval_set` is not a tuple. An error message is better than silently crashing. -* Allow using `numpy.RandomState` object. -* Add `n_jobs` as an alias of `nthread`. -* Roadmap: #5152 - -### XGBoost4J-Spark: Redesigning checkpointing mechanism -* RFC is available at #4786 -* Clean up checkpoint file after a successful training job (#4754): The current implementation in XGBoost4J-Spark does not clean up the checkpoint file after a successful training job. If the user runs another job with the same checkpointing directory, she will get a wrong model because the second job will re-use the checkpoint file left over from the first job. To prevent this scenario, we propose to always clean up the checkpoint file after every successful training job. -* Avoid Multiple Jobs for Checkpointing (#5082): The current method for checkpoint is to collect the booster produced at the last iteration of each checkpoint internal to Driver and persist it in HDFS. The major issue with this approach is that it needs to re-perform the data preparation for training if the user did not choose to cache the training dataset. To avoid re-performing data prep, we build external-memory checkpointing in the XGBoost4J layer as well. -* Enable deterministic repartitioning when checkpoint is enabled (#4807): Distributed algorithm for gradient boosting assumes a fixed partition of the training data between multiple iterations. In previous versions, there was no guarantee that data partition would stay the same, especially when a worker goes down and some data had to recovered from previous checkpoint. In this release, we make data partition deterministic by using the data hash value of each data row in computing the partition. - -### XGBoost4J-Spark: handle errors thrown by the native code (#4560) -* All core logic of XGBoost is written in C++, so XGBoost4J-Spark internally uses the C++ code via Java Native Interface (JNI). #4560 adds a proper error handling for any errors or exceptions arising from the C++ code, so that the XGBoost Spark application can be torn down in an orderly fashion. - -### XGBoost4J-Spark: Refine method to count the number of alive cores (#4858) -* The `SparkParallelismTracker` class ensures that sufficient number of executor cores are alive. To that end, it is important to query the number of alive cores reliably. - -### XGBoost4J: Add `BigDenseMatrix` to store more than `Integer.MAX_VALUE` elements (#4383) - -### Robust model serialization with JSON (#4632, #4708, #4739, #4868, #4936, #4945, #4974, #5086, #5087, #5089, #5091, #5094, #5110, #5111, #5112, #5120, #5137, #5218, #5222, #5236, #5245, #5248, #5281) -* In this release, we introduce an experimental support of using [JSON](https://www.json.org/json-en.html) for serializing (saving/loading) XGBoost models and related hyperparameters for training. We would like to eventually replace the old binary format with JSON, since it is an open format and parsers are available in many programming languages and platforms. See [the documentation for model I/O using JSON](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/saving_model.html). #3980 explains why JSON was chosen over other alternatives. -* To maximize interoperability and compatibility of the serialized models, we now split serialization into two parts (#4855): - 1. Model, e.g. decision trees and strictly related metadata like `num_features`. - 2. Internal configuration, consisting of training parameters and other configurable parameters. For example, `max_delta_step`, `tree_method`, `objective`, `predictor`, `gpu_id`. - - Previously, users often ran into issues where the model file produced by one machine could not load or run on another machine. For example, models trained using a machine with an NVIDIA GPU could not run on another machine without a GPU (#5291, #5234). The reason is that the old binary format saved some internal configuration that were not universally applicable to all machines, e.g. `predictor='gpu_predictor'`. - - Now, model saving function (`Booster.save_model()` in Python) will save only the model, without internal configuration. This will guarantee that your model file would be used anywhere. Internal configuration will be serialized in limited circumstances such as: - * Multiple nodes in a distributed system exchange model details over the network. - * Model checkpointing, to recover from possible crashes. - - This work proved to be useful for parameter validation as well (see below). -* Starting with 1.0.0 release, we will use semantic versioning to indicate whether the model produced by one version of XGBoost would be compatible with another version of XGBoost. Any change in the major version indicates a breaking change in the serialization format. -* We now provide a robust method to save and load scikit-learn related attributes (#5245). Previously, we used Python pickle to save Python attributes related to `XGBClassifier`, `XGBRegressor`, and `XGBRanker` objects. The attributes are necessary to properly interact with scikit-learn. See #4639 for more details. The use of pickling hampered interoperability, as a pickle from one machine may not necessarily work on another machine. Starting with this release, we use an alternative method to serialize the scikit-learn related attributes. The use of Python pickle is now discouraged (#5236, #5281). - -### Parameter validation: detection of unused or incorrect parameters (#4553, #4577, #4738, #4801, #4961, #5101, #5157, #5167, #5256) -* Mis-spelled training parameter is a common user mistake. In previous versions of XGBoost, mis-spelled parameters were silently ignored. Starting with 1.0.0 release, XGBoost will produce a warning message if there is any unused training parameters. Currently, parameter validation is available to R users and Python XGBoost API users. We are working to extend its support to scikit-learn users. -* Configuration steps now have well-defined semantics (#4542, #4738), so we know exactly where and how the internal configurable parameters are changed. -* The user can now use `save_config()` function to inspect all (used) training parameters. This is helpful for debugging model performance. - -### Allow individual workers to recover from faults (#4808, #4966) -* Status quo: if a worker fails, all workers are shut down and restarted, and learning resumes from the last checkpoint. This involves requesting resources from the scheduler (e.g. Spark) and shuffling all the data again from scratch. Both of these operations can be quite costly and block training for extended periods of time, especially if the training data is big and the number of worker nodes is in the hundreds. -* The proposed solution is to recover the single node that failed, instead of shutting down all workers. The rest of the clusters wait until the single failed worker is bootstrapped and catches up with the rest. -* See roadmap at #4753. Note that this is work in progress. In particular, the feature is not yet available from XGBoost4J-Spark. - -### Accurate prediction for DART models -* Use DART tree weights when computing SHAPs (#5050) -* Don't drop trees during DART prediction by default (#5115) -* Fix DART prediction in R (#5204) - -### Make external memory more robust -* Fix issues with training with external memory on cpu (#4487) -* Fix crash with approx tree method on cpu (#4510) -* Fix external memory race in `exact` (#4980). Note: `dmlc::ThreadedIter` is not actually thread-safe. We would like to re-design it in the long term. - -### Major refactoring of the `DMatrix` class (#4686, #4744, #4748, #5044, #5092, #5108, #5188, #5198) -* Goal 1: improve performance and reduce memory consumption. Right now, if the user trains a model with a NumPy array as training data, the array gets copies 2-3 times before training begins. We'd like to reduce duplication of the data matrix. -* Goal 2: Expose a common interface to external data, unify the way DMatrix objects are constructed and simplify the process of adding new external data sources. This work is essential for ingesting cuPy arrays. -* Goal 3: Handle missing values consistently. -* RFC: #4354, Roadmap: #5143 -* This work is also relevant to external memory support on GPUs. - -### Breaking: XGBoost Python package now requires Python 3.5 or newer (#5021, #5274) -* Python 3.4 has reached its end-of-life on March 16, 2019, so we now require Python 3.5 or newer. - -### Breaking: GPU algorithm now requires CUDA 9.0 and higher (#4527, #4580) - -### Breaking: `n_gpus` parameter removed; multi-GPU training now requires a distributed framework (#4579, #4749, #4773, #4810, #4867, #4908) -* #4531 proposed removing support for single-process multi-GPU training. Contributors would focus on multi-GPU support through distributed frameworks such as Dask and Spark, where the framework would be expected to assign a worker process for each GPU independently. By delegating GPU management and data movement to the distributed framework, we can greatly simplify the core XGBoost codebase, make multi-GPU training more robust, and reduce burden for future development. - -### Breaking: Some deprecated features have been removed -* ``gpu_exact`` training method (#4527, #4742, #4777). Use ``gpu_hist`` instead. -* ``learning_rates`` parameter in Python (#5155). Use the callback API instead. -* ``num_roots`` (#5059, #5165), since the current training code always uses a single root node. -* GPU-specific objectives (#4690), such as `gpu:reg:linear`. Use objectives without `gpu:` prefix; GPU will be used automatically if your machine has one. - -### Breaking: the C API function `XGBoosterPredict()` now asks for an extra parameter `training`. - -### Breaking: We now use CMake exclusively to build XGBoost. `Makefile` is being sunset. -* Exception: the R package uses Autotools, as the CRAN ecosystem did not yet adopt CMake widely. - -### Performance improvements -* Smarter choice of histogram construction for distributed `gpu_hist` (#4519) -* Optimizations for quantization on device (#4572) -* Introduce caching memory allocator to avoid latency associated with GPU memory allocation (#4554, #4615) -* Optimize the initialization stage of the CPU `hist` algorithm for sparse datasets (#4625) -* Prevent unnecessary data copies from GPU memory to the host (#4795) -* Improve operation efficiency for single prediction (#5016) -* Group builder modified for incremental building, to speed up building large `DMatrix` (#5098) - -### Bug-fixes -* Eliminate `FutureWarning: Series.base is deprecated` (#4337) -* Ensure pandas DataFrame column names are treated as strings in type error message (#4481) -* [jvm-packages] Add back `reg:linear` for scala, as it is only deprecated and not meant to be removed yet (#4490) -* Fix library loading for Cygwin users (#4499) -* Fix prediction from loaded pickle (#4516) -* Enforce exclusion between `pred_interactions=True` and `pred_interactions=True` (#4522) -* Do not return dangling reference to local `std::string` (#4543) -* Set the appropriate device before freeing device memory (#4566) -* Mark `SparsePageDmatrix` destructor default. (#4568) -* Choose the appropriate tree method only when the tree method is 'auto' (#4571) -* Fix `benchmark_tree.py` (#4593) -* [jvm-packages] Fix silly bug in feature scoring (#4604) -* Fix GPU predictor when the test data matrix has different number of features than the training data matrix used to train the model (#4613) -* Fix external memory for get column batches. (#4622) -* [R] Use built-in label when xgb.DMatrix is given to xgb.cv() (#4631) -* Fix early stopping in the Python package (#4638) -* Fix AUC error in distributed mode caused by imbalanced dataset (#4645, #4798) -* [jvm-packages] Expose `setMissing` method in `XGBoostClassificationModel` / `XGBoostRegressionModel` (#4643) -* Remove initializing stringstream reference. (#4788) -* [R] `xgb.get.handle` now checks all class listed of `object` (#4800) -* Do not use `gpu_predictor` unless data comes from GPU (#4836) -* Fix data loading (#4862) -* Workaround `isnan` across different environments. (#4883) -* [jvm-packages] Handle Long-type parameter (#4885) -* Don't `set_params` at the end of `set_state` (#4947). Ensure that the model does not change after pickling and unpickling multiple times. -* C++ exceptions should not crash OpenMP loops (#4960) -* Fix `usegpu` flag in DART. (#4984) -* Run training with empty `DMatrix` (#4990, #5159) -* Ensure that no two processes can use the same GPU (#4990) -* Fix repeated split and 0 cover nodes (#5010) -* Reset histogram hit counter between multiple data batches (#5035) -* Fix `feature_name` crated from int64index dataframe. (#5081) -* Don't use 0 for "fresh leaf" (#5084) -* Throw error when user attempts to use multi-GPU training and XGBoost has not been compiled with NCCL (#5170) -* Fix metric name loading (#5122) -* Quick fix for memory leak in CPU `hist` algorithm (#5153) -* Fix wrapping GPU ID and prevent data copying (#5160) -* Fix signature of Span constructor (#5166) -* Lazy initialization of device vector, so that XGBoost compiled with CUDA can run on a machine without any GPU (#5173) -* Model loading should not change system locale (#5314) -* Distributed training jobs would sometimes hang; revert Rabit to fix this regression (dmlc/rabit#132, #5237) - -### API changes -* Add support for cross-validation using query ID (#4474) -* Enable feature importance property for DART model (#4525) -* Add `rmsle` metric and `reg:squaredlogerror` objective (#4541) -* All objective and evaluation metrics are now exposed to JVM packages (#4560) -* `dump_model()` and `get_dump()` now support exporting in GraphViz language (#4602) -* Support metrics `ndcg-` and `map-` (#4635) -* [jvm-packages] Allow chaining prediction (transform) in XGBoost4J-Spark (#4667) -* [jvm-packages] Add option to bypass missing value check in the Spark layer (#4805). Only use this option if you know what you are doing. -* [jvm-packages] Add public group getter (#4838) -* `XGDMatrixSetGroup` C API is now deprecated (#4864). Use `XGDMatrixSetUIntInfo` instead. -* [R] Added new `train_folds` parameter to `xgb.cv()` (#5114) -* Ingest meta information from Pandas DataFrame, such as data weights (#5216) - -### Maintenance: Refactor code for legibility and maintainability -* De-duplicate GPU parameters (#4454) -* Simplify INI-style config reader using C++11 STL (#4478, #4521) -* Refactor histogram building code for `gpu_hist` (#4528) -* Overload device memory allocator, to enable instrumentation for compiling memory usage statistics (#4532) -* Refactor out row partitioning logic from `gpu_hist` (#4554) -* Remove an unused variable (#4588) -* Implement tree model dump with code generator, to de-duplicate code for generating dumps in 3 different formats (#4602) -* Remove `RowSet` class which is no longer being used (#4697) -* Remove some unused functions as reported by cppcheck (#4743) -* Mimic CUDA assert output in Span check (#4762) -* [jvm-packages] Refactor `XGBoost.scala` to put all params processing in one place (#4815) -* Add some comments for GPU row partitioner (#4832) -* Span: use `size_t' for index_type, add `front' and `back'. (#4935) -* Remove dead code in `exact` algorithm (#5034, #5105) -* Unify integer types used for row and column indices (#5034) -* Extract feature interaction constraint from `SplitEvaluator` class. (#5034) -* [Breaking] De-duplicate paramters and docstrings in the constructors of Scikit-Learn models (#5130) -* Remove benchmark code from GPU tests (#5141) -* Clean up Python 2 compatibility code. (#5161) -* Extensible binary serialization format for `DMatrix::MetaInfo` (#5187). This will be useful for implementing censored labels for survival analysis applications. -* Cleanup clang-tidy warnings. (#5247) - -### Maintenance: testing, continuous integration, build system -* Use `yaml.safe_load` instead of `yaml.load`. (#4537) -* Ensure GCC is at least 5.x (#4538) -* Remove all mention of `reg:linear` from tests (#4544) -* [jvm-packages] Upgrade to Scala 2.12 (#4574) -* [jvm-packages] Update kryo dependency to 2.22 (#4575) -* [CI] Specify account ID when logging into ECR Docker registry (#4584) -* Use Sphinx 2.1+ to compile documentation (#4609) -* Make Pandas optional for running Python unit tests (#4620) -* Fix spark tests on machines with many cores (#4634) -* [jvm-packages] Update local dev build process (#4640) -* Add optional dependencies to setup.py (#4655) -* [jvm-packages] Fix maven warnings (#4664) -* Remove extraneous files from the R package, to comply with CRAN policy (#4699) -* Remove VC-2013 support, since it is not C++11 compliant (#4701) -* [CI] Fix broken installation of Pandas (#4704, #4722) -* [jvm-packages] Clean up temporary files afer running tests (#4706) -* Specify version macro in CMake. (#4730) -* Include dmlc-tracker into XGBoost Python package (#4731) -* [CI] Use long key ID for Ubuntu repository fingerprints. (#4783) -* Remove plugin, cuda related code in automake & autoconf files (#4789) -* Skip related tests when scikit-learn is not installed. (#4791) -* Ignore vscode and clion files (#4866) -* Use bundled Google Test by default (#4900) -* [CI] Raise timeout threshold in Jenkins (#4938) -* Copy CMake parameter from dmlc-core. (#4948) -* Set correct file permission. (#4964) -* [CI] Update lint configuration to support latest pylint convention (#4971) -* [CI] Upload nightly builds to S3 (#4976, #4979) -* Add asan.so.5 to cmake script. (#4999) -* [CI] Fix Travis tests. (#5062) -* [CI] Locate vcomp140.dll from System32 directory (#5078) -* Implement training observer to dump internal states of objects (#5088). This will be useful for debugging. -* Fix visual studio output library directories (#5119) -* [jvm-packages] Comply with scala style convention + fix broken unit test (#5134) -* [CI] Repair download URL for Maven 3.6.1 (#5139) -* Don't use modernize-use-trailing-return-type in clang-tidy. (#5169) -* Explicitly use UTF-8 codepage when using MSVC (#5197) -* Add CMake option to run Undefined Behavior Sanitizer (UBSan) (#5211) -* Make some GPU tests deterministic (#5229) -* [R] Robust endian detection in CRAN xgboost build (#5232) -* Support FreeBSD (#5233) -* Make `pip install xgboost*.tar.gz` work by fixing build-python.sh (#5241) -* Fix compilation error due to 64-bit integer narrowing to `size_t` (#5250) -* Remove use of `std::cout` from R package, to comply with CRAN policy (#5261) -* Update DMLC-Core submodule (#4674, #4688, #4726, #4924) -* Update Rabit submodule (#4560, #4667, #4718, #4808, #4966, #5237) - -### Usability Improvements, Documentation -* Add Random Forest API to Python API doc (#4500) -* Fix Python demo and doc. (#4545) -* Remove doc about not supporting cuda 10.1 (#4578) -* Address some sphinx warnings and errors, add doc for building doc. (#4589) -* Add instruction to run formatting checks locally (#4591) -* Fix docstring for `XGBModel.predict()` (#4592) -* Doc and demo for customized metric and objective (#4598, #4608) -* Add to documentation how to run tests locally (#4610) -* Empty evaluation list in early stopping should produce meaningful error message (#4633) -* Fixed year to 2019 in conf.py, helpers.h and LICENSE (#4661) -* Minor updates to links and grammar (#4673) -* Remove `silent` in doc (#4689) -* Remove old Python trouble shooting doc (#4729) -* Add `os.PathLike` support for file paths to DMatrix and Booster Python classes (#4757) -* Update XGBoost4J-Spark doc (#4804) -* Regular formatting for evaluation metrics (#4803) -* [jvm-packages] Refine documentation for handling missing values in XGBoost4J-Spark (#4805) -* Monitor for distributed envorinment (#4829). This is useful for identifying performance bottleneck. -* Add check for length of weights and produce a good error message (#4872) -* Fix DMatrix doc (#4884) -* Export C++ headers in CMake installation (#4897) -* Update license year in README.md to 2019 (#4940) -* Fix incorrectly displayed Note in the doc (#4943) -* Follow PEP 257 Docstring Conventions (#4959) -* Document minimum version required for Google Test (#5001) -* Add better error message for invalid feature names (#5024) -* Some guidelines on device memory usage (#5038) -* [doc] Some notes for external memory. (#5065) -* Update document for `tree_method` (#5106) -* Update demo for ranking. (#5154) -* Add new lines for Spark XGBoost missing values section (#5180) -* Fix simple typo: utilty -> utility (#5182) -* Update R doc by roxygen2 (#5201) -* [R] Direct user to use `set.seed()` instead of setting `seed` parameter (#5125) -* Add Optuna badge to `README.md` (#5208) -* Fix compilation error in `c-api-demo.c` (#5215) - -### Acknowledgement -**Contributors**: Nan Zhu (@CodingCat), Crissman Loomis (@Crissman), Cyprien Ricque (@Cyprien-Ricque), Evan Kepner (@EvanKepner), K.O. (@Hi-king), KaiJin Ji (@KerryJi), Peter Badida (@KeyWeeUsr), Kodi Arfer (@Kodiologist), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Jacob Kim (@TheJacobKim), Vibhu Jawa (@VibhuJawa), Marcos (@astrowonk), Andy Adinets (@canonizer), Chen Qin (@chenqin), Christopher Cowden (@cowden), @cpfarrell, @david-cortes, Liangcai Li (@firestarman), @fuhaoda, Philip Hyunsu Cho (@hcho3), @here-nagini, Tong He (@hetong007), Michal Kurka (@michalkurka), Honza Sterba (@honzasterba), @iblumin, @koertkuipers, mattn (@mattn), Mingjie Tang (@merlintang), OrdoAbChao (@mglowacki100), Matthew Jones (@mt-jones), mitama (@nigimitama), Nathan Moore (@nmoorenz), Daniel Stahl (@phillyfan1138), Michaël Benesty (@pommedeterresautee), Rong Ou (@rongou), Sebastian (@sfahnens), Xu Xiao (@sperlingxx), @sriramch, Sean Owen (@srowen), Stephanie Yang (@stpyang), Yuan Tang (@terrytangyuan), Mathew Wicks (@thesuperzapper), Tim Gates (@timgates42), TinkleG (@tinkle1129), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Matvey Turkov (@turk0v), Bobby Wang (@wbo4958), yage (@yage99), @yellowdolphin - -**Reviewers**: Nan Zhu (@CodingCat), Crissman Loomis (@Crissman), Cyprien Ricque (@Cyprien-Ricque), Evan Kepner (@EvanKepner), John Zedlewski (@JohnZed), KOLANICH (@KOLANICH), KaiJin Ji (@KerryJi), Kodi Arfer (@Kodiologist), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Nikita Titov (@StrikerRUS), Jacob Kim (@TheJacobKim), Vibhu Jawa (@VibhuJawa), Andrew Kane (@ankane), Arno Candel (@arnocandel), Marcos (@astrowonk), Bryan Woods (@bryan-woods), Andy Adinets (@canonizer), Chen Qin (@chenqin), Thomas Franke (@coding-komek), Peter (@codingforfun), @cpfarrell, Joshua Patterson (@datametrician), @fuhaoda, Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), Honza Sterba (@honzasterba), @iblumin, @jakirkham, Vadim Khotilovich (@khotilov), Keith Kraus (@kkraus14), @koertkuipers, @melonki, Mingjie Tang (@merlintang), OrdoAbChao (@mglowacki100), Daniel Mahler (@mhlr), Matthew Rocklin (@mrocklin), Matthew Jones (@mt-jones), Michaël Benesty (@pommedeterresautee), PSEUDOTENSOR / Jonathan McKinney (@pseudotensor), Rong Ou (@rongou), Vladimir (@sh1ng), Scott Lundberg (@slundberg), Xu Xiao (@sperlingxx), @sriramch, Pasha Stetsenko (@st-pasha), Stephanie Yang (@stpyang), Yuan Tang (@terrytangyuan), Mathew Wicks (@thesuperzapper), Theodore Vasiloudis (@thvasilo), TinkleG (@tinkle1129), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Bobby Wang (@wbo4958), yage (@yage99), @yellowdolphin, Yin Lou (@yinlou) - -## v0.90 (2019.05.18) - -### XGBoost Python package drops Python 2.x (#4379, #4381) -Python 2.x is reaching its end-of-life at the end of this year. [Many scientific Python packages are now moving to drop Python 2.x](https://python3statement.org/). - -### XGBoost4J-Spark now requires Spark 2.4.x (#4377) -* Spark 2.3 is reaching its end-of-life soon. See discussion at #4389. -* **Consistent handling of missing values** (#4309, #4349, #4411): Many users had reported issue with inconsistent predictions between XGBoost4J-Spark and the Python XGBoost package. The issue was caused by Spark mis-handling non-zero missing values (NaN, -1, 999 etc). We now alert the user whenever Spark doesn't handle missing values correctly (#4309, #4349). See [the tutorial for dealing with missing values in XGBoost4J-Spark](https://xgboost.readthedocs.io/en/release_0.90/jvm/xgboost4j_spark_tutorial.html#dealing-with-missing-values). This fix also depends on the availability of Spark 2.4.x. - -### Roadmap: better performance scaling for multi-core CPUs (#4310) -* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #4310 optimizes quantile sketches and other pre-processing tasks. Special thanks to @SmirnovEgorRu. - -### Roadmap: Harden distributed training (#4250) -* Make distributed training in XGBoost more robust by hardening [Rabit](https://github.com/dmlc/rabit), which implements [the AllReduce primitive](https://en.wikipedia.org/wiki/Reduce_%28parallel_pattern%29). In particular, improve test coverage on mechanisms for fault tolerance and recovery. Special thanks to @chenqin. - -### New feature: Multi-class metric functions for GPUs (#4368) -* Metrics for multi-class classification have been ported to GPU: `merror`, `mlogloss`. Special thanks to @trivialfis. -* With supported metrics, XGBoost will select the correct devices based on your system and `n_gpus` parameter. - -### New feature: Scikit-learn-like random forest API (#4148, #4255, #4258) -* XGBoost Python package now offers `XGBRFClassifier` and `XGBRFRegressor` API to train random forests. See [the tutorial](https://xgboost.readthedocs.io/en/release_0.90/tutorials/rf.html). Special thanks to @canonizer - -### New feature: use external memory in GPU predictor (#4284, #4396, #4438, #4457) -* It is now possible to make predictions on GPU when the input is read from external memory. This is useful when you want to make predictions with big dataset that does not fit into the GPU memory. Special thanks to @rongou, @canonizer, @sriramch. - - ```python - dtest = xgboost.DMatrix('test_data.libsvm#dtest.cache') - bst.set_param('predictor', 'gpu_predictor') - bst.predict(dtest) - ``` - -* Coming soon: GPU training (`gpu_hist`) with external memory - -### New feature: XGBoost can now handle comments in LIBSVM files (#4430) -* Special thanks to @trivialfis and @hcho3 - -### New feature: Embed XGBoost in your C/C++ applications using CMake (#4323, #4333, #4453) -* It is now easier than ever to embed XGBoost in your C/C++ applications. In your CMakeLists.txt, add `xgboost::xgboost` as a linked library: - - ```cmake - find_package(xgboost REQUIRED) - add_executable(api-demo c-api-demo.c) - target_link_libraries(api-demo xgboost::xgboost) - ``` - - [XGBoost C API documentation is available.](https://xgboost.readthedocs.io/en/release_0.90/dev) Special thanks to @trivialfis - -### Performance improvements -* Use feature interaction constraints to narrow split search space (#4341, #4428) -* Additional optimizations for `gpu_hist` (#4248, #4283) -* Reduce OpenMP thread launches in `gpu_hist` (#4343) -* Additional optimizations for multi-node multi-GPU random forests. (#4238) -* Allocate unique prediction buffer for each input matrix, to avoid re-sizing GPU array (#4275) -* Remove various synchronisations from CUDA API calls (#4205) -* XGBoost4J-Spark - - Allow the user to control whether to cache partitioned training data, to potentially reduce execution time (#4268) - -### Bug-fixes -* Fix node reuse in `hist` (#4404) -* Fix GPU histogram allocation (#4347) -* Fix matrix attributes not sliced (#4311) -* Revise AUC and AUCPR metrics now work with weighted ranking task (#4216, #4436) -* Fix timer invocation for InitDataOnce() in `gpu_hist` (#4206) -* Fix R-devel errors (#4251) -* Make gradient update in GPU linear updater thread-safe (#4259) -* Prevent out-of-range access in column matrix (#4231) -* Don't store DMatrix handle in Python object until it's initialized, to improve exception safety (#4317) -* XGBoost4J-Spark - - Fix non-deterministic order within a zipped partition on prediction (#4388) - - Remove race condition on tracker shutdown (#4224) - - Allow set the parameter `maxLeaves`. (#4226) - - Allow partial evaluation of dataframe before prediction (#4407) - - Automatically set `maximize_evaluation_metrics` if not explicitly given (#4446) - -### API changes -* Deprecate `reg:linear` in favor of `reg:squarederror`. (#4267, #4427) -* Add attribute getter and setter to the Booster object in XGBoost4J (#4336) - -### Maintenance: Refactor C++ code for legibility and maintainability -* Fix clang-tidy warnings. (#4149) -* Remove deprecated C APIs. (#4266) -* Use Monitor class to time functions in `hist`. (#4273) -* Retire DVec class in favour of c++20 style span for device memory. (#4293) -* Improve HostDeviceVector exception safety (#4301) - -### Maintenance: testing, continuous integration, build system -* **Major refactor of CMakeLists.txt** (#4323, #4333, #4453): adopt modern CMake and export XGBoost as a target -* **Major improvement in Jenkins CI pipeline** (#4234) - - Migrate all Linux tests to Jenkins (#4401) - - Builds and tests are now de-coupled, to test an artifact against multiple versions of CUDA, JDK, and other dependencies (#4401) - - Add Windows GPU to Jenkins CI pipeline (#4463, #4469) -* Support CUDA 10.1 (#4223, #4232, #4265, #4468) -* Python wheels are now built with CUDA 9.0, so that JIT is not required on Volta architecture (#4459) -* Integrate with NVTX CUDA profiler (#4205) -* Add a test for cpu predictor using external memory (#4308) -* Refactor tests to get rid of duplication (#4358) -* Remove test dependency on `craigcitro/r-travis`, since it's deprecated (#4353) -* Add files from local R build to `.gitignore` (#4346) -* Make XGBoost4J compatible with Java 9+ by revising NativeLibLoader (#4351) -* Jenkins build for CUDA 10.0 (#4281) -* Remove remaining `silent` and `debug_verbose` in Python tests (#4299) -* Use all cores to build XGBoost4J lib on linux (#4304) -* Upgrade Jenkins Linux build environment to GCC 5.3.1, CMake 3.6.0 (#4306) -* Make CMakeLists.txt compatible with CMake 3.3 (#4420) -* Add OpenMP option in CMakeLists.txt (#4339) -* Get rid of a few trivial compiler warnings (#4312) -* Add external Docker build cache, to speed up builds on Jenkins CI (#4331, #4334, #4458) -* Fix Windows tests (#4403) -* Fix a broken python test (#4395) -* Use a fixed seed to split data in XGBoost4J-Spark tests, for reproducibility (#4417) -* Add additional Python tests to test training under constraints (#4426) -* Enable building with shared NCCL. (#4447) - -### Usability Improvements, Documentation -* Document limitation of one-split-at-a-time Greedy tree learning heuristic (#4233) -* Update build doc: PyPI wheel now support multi-GPU (#4219) -* Fix docs for `num_parallel_tree` (#4221) -* Fix document about `colsample_by*` parameter (#4340) -* Make the train and test input with same colnames. (#4329) -* Update R contribute link. (#4236) -* Fix travis R tests (#4277) -* Log version number in crash log in XGBoost4J-Spark (#4271, #4303) -* Allow supression of Rabit output in Booster::train in XGBoost4J (#4262) -* Add tutorial on handling missing values in XGBoost4J-Spark (#4425) -* Fix typos (#4345, #4393, #4432, #4435) -* Added language classifier in setup.py (#4327) -* Added Travis CI badge (#4344) -* Add BentoML to use case section (#4400) -* Remove subtly sexist remark (#4418) -* Add R vignette about parsing JSON dumps (#4439) - -### Acknowledgement -**Contributors**: Nan Zhu (@CodingCat), Adam Pocock (@Craigacp), Daniel Hen (@Daniel8hen), Jiaxiang Li (@JiaxiangBU), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Andy Adinets (@canonizer), Jonas (@elcombato), Harry Braviner (@harrybraviner), Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), James Lamb (@jameslamb), Jean-Francois Zinque (@jeffzi), Yang Yang (@jokerkeny), Mayank Suman (@mayanksuman), jess (@monkeywithacupcake), Hajime Morrita (@omo), Ravi Kalia (@project-delphi), @ras44, Rong Ou (@rongou), Shaochen Shi (@shishaochen), Xu Xiao (@sperlingxx), @sriramch, Jiaming Yuan (@trivialfis), Christopher Suchanek (@wsuchy), Bozhao (@yubozhao) - -**Reviewers**: Nan Zhu (@CodingCat), Adam Pocock (@Craigacp), Daniel Hen (@Daniel8hen), Jiaxiang Li (@JiaxiangBU), Laurae (@Laurae2), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), @alois-bissuel, Andy Adinets (@canonizer), Chen Qin (@chenqin), Harry Braviner (@harrybraviner), Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), @jakirkham, James Lamb (@jameslamb), Julien Schueller (@jschueller), Mayank Suman (@mayanksuman), Hajime Morrita (@omo), Rong Ou (@rongou), Sara Robinson (@sararob), Shaochen Shi (@shishaochen), Xu Xiao (@sperlingxx), @sriramch, Sean Owen (@srowen), Sergei Lebedev (@superbobry), Yuan (Terry) Tang (@terrytangyuan), Theodore Vasiloudis (@thvasilo), Matthew Tovbin (@tovbinm), Jiaming Yuan (@trivialfis), Xin Yin (@xydrolase) - -## v0.82 (2019.03.03) -This release is packed with many new features and bug fixes. - -### Roadmap: better performance scaling for multi-core CPUs (#3957) -* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #3957 marks an important step toward better performance scaling, by using software pre-fetching and replacing STL vectors with C-style arrays. Special thanks to @Laurae2 and @SmirnovEgorRu. -* See #3810 for latest progress on this roadmap. - -### New feature: Distributed Fast Histogram Algorithm (`hist`) (#4011, #4102, #4140, #4128) -* It is now possible to run the `hist` algorithm in distributed setting. Special thanks to @CodingCat. The benefits include: - 1. Faster local computation via feature binning - 2. Support for monotonic constraints and feature interaction constraints - 3. Simpler codebase than `approx`, allowing for future improvement -* Depth-wise tree growing is now performed in a separate code path, so that cross-node syncronization is performed only once per level. - -### New feature: Multi-Node, Multi-GPU training (#4095) -* Distributed training is now able to utilize clusters equipped with NVIDIA GPUs. In particular, the rabit AllReduce layer will communicate GPU device information. Special thanks to @mt-jones, @RAMitchell, @rongou, @trivialfis, @canonizer, and @jeffdk. -* Resource management systems will be able to assign a rank for each GPU in the cluster. -* In Dask, users will be able to construct a collection of XGBoost processes over an inhomogeneous device cluster (i.e. workers with different number and/or kinds of GPUs). - -### New feature: Multiple validation datasets in XGBoost4J-Spark (#3904, #3910) -* You can now track the performance of the model during training with multiple evaluation datasets. By specifying `eval_sets` or call `setEvalSets` over a `XGBoostClassifier` or `XGBoostRegressor`, you can pass in multiple evaluation datasets typed as a `Map` from `String` to `DataFrame`. Special thanks to @CodingCat. -* See the usage of multiple validation datasets [here](https://github.com/dmlc/xgboost/blob/0c1d5f1120c0a159f2567b267f0ec4ffadee00d0/jvm-packages/xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkTraining.scala#L66-L78) - -### New feature: Additional metric functions for GPUs (#3952) -* Element-wise metrics have been ported to GPU: `rmse`, `mae`, `logloss`, `poisson-nloglik`, `gamma-deviance`, `gamma-nloglik`, `error`, `tweedie-nloglik`. Special thanks to @trivialfis and @RAMitchell. -* With supported metrics, XGBoost will select the correct devices based on your system and `n_gpus` parameter. - -### New feature: Column sampling at individual nodes (splits) (#3971) -* Columns (features) can now be sampled at individual tree nodes, in addition to per-tree and per-level sampling. To enable per-node sampling, set `colsample_bynode` parameter, which represents the fraction of columns sampled at each node. This parameter is set to 1.0 by default (i.e. no sampling per node). Special thanks to @canonizer. -* The `colsample_bynode` parameter works cumulatively with other `colsample_by*` parameters: for example, `{'colsample_bynode':0.5, 'colsample_bytree':0.5}` with 100 columns will give 25 features to choose from at each split. - -### Major API change: consistent logging level via `verbosity` (#3982, #4002, #4138) -* XGBoost now allows fine-grained control over logging. You can set `verbosity` to 0 (silent), 1 (warning), 2 (info), and 3 (debug). This is useful for controlling the amount of logging outputs. Special thanks to @trivialfis. -* Parameters `silent` and `debug_verbose` are now deprecated. -* Note: Sometimes XGBoost tries to change configurations based on heuristics, which is displayed as warning message. If there's unexpected behaviour, please try to increase value of verbosity. - -### Major bug fix: external memory (#4040, #4193) -* Clarify object ownership in multi-threaded prefetcher, to avoid memory error. -* Correctly merge two column batches (which uses [CSC layout](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS))). -* Add unit tests for external memory. -* Special thanks to @trivialfis and @hcho3. - -### Major bug fix: early stopping fixed in XGBoost4J and XGBoost4J-Spark (#3928, #4176) -* Early stopping in XGBoost4J and XGBoost4J-Spark is now consistent with its counterpart in the Python package. Training stops if the current iteration is `earlyStoppingSteps` away from the best iteration. If there are multiple evaluation sets, only the last one is used to determinate early stop. -* See the updated documentation [here](https://xgboost.readthedocs.io/en/release_0.82/jvm/xgboost4j_spark_tutorial.html#early-stopping) -* Special thanks to @CodingCat, @yanboliang, and @mingyang. - -### Major bug fix: infrequent features should not crash distributed training (#4045) -* For infrequently occuring features, some partitions may not get any instance. This scenario used to crash distributed training due to mal-formed ranges. The problem has now been fixed. -* In practice, one-hot-encoded categorical variables tend to produce rare features, particularly when the cardinality is high. -* Special thanks to @CodingCat. - -### Performance improvements -* Faster, more space-efficient radix sorting in `gpu_hist` (#3895) -* Subtraction trick in histogram calculation in `gpu_hist` (#3945) -* More performant re-partition in XGBoost4J-Spark (#4049) - -### Bug-fixes -* Fix semantics of `gpu_id` when running multiple XGBoost processes on a multi-GPU machine (#3851) -* Fix page storage path for external memory on Windows (#3869) -* Fix configuration setup so that DART utilizes GPU (#4024) -* Eliminate NAN values from SHAP prediction (#3943) -* Prevent empty quantile sketches in `hist` (#4155) -* Enable running objectives with 0 GPU (#3878) -* Parameters are no longer dependent on system locale (#3891, #3907) -* Use consistent data type in the GPU coordinate descent code (#3917) -* Remove undefined behavior in the CLI config parser on the ARM platform (#3976) -* Initialize counters in GPU AllReduce (#3987) -* Prevent deadlocks in GPU AllReduce (#4113) -* Load correct values from sliced NumPy arrays (#4147, #4165) -* Fix incorrect GPU device selection (#4161) -* Make feature binning logic in `hist` aware of query groups when running a ranking task (#4115). For ranking task, query groups are weighted, not individual instances. -* Generate correct C++ exception type for `LOG(FATAL)` macro (#4159) -* Python package - - Python package should run on system without `PATH` environment variable (#3845) - - Fix `coef_` and `intercept_` signature to be compatible with `sklearn.RFECV` (#3873) - - Use UTF-8 encoding in Python package README, to support non-English locale (#3867) - - Add AUC-PR to list of metrics to maximize for early stopping (#3936) - - Allow loading pickles without `self.booster` attribute, for backward compatibility (#3938, #3944) - - White-list DART for feature importances (#4073) - - Update usage of [h2oai/datatable](https://github.com/h2oai/datatable) (#4123) -* XGBoost4J-Spark - - Address scalability issue in prediction (#4033) - - Enforce the use of per-group weights for ranking task (#4118) - - Fix vector size of `rawPredictionCol` in `XGBoostClassificationModel` (#3932) - - More robust error handling in Spark tracker (#4046, #4108) - - Fix return type of `setEvalSets` (#4105) - - Return correct value of `getMaxLeaves` (#4114) - -### API changes -* Add experimental parameter `single_precision_histogram` to use single-precision histograms for the `gpu_hist` algorithm (#3965) -* Python package - - Add option to select type of feature importances in the scikit-learn inferface (#3876) - - Add `trees_to_df()` method to dump decision trees as Pandas data frame (#4153) - - Add options to control node shapes in the GraphViz plotting function (#3859) - - Add `xgb_model` option to `XGBClassifier`, to load previously saved model (#4092) - - Passing lists into `DMatrix` is now deprecated (#3970) -* XGBoost4J - - Support multiple feature importance features (#3801) - -### Maintenance: Refactor C++ code for legibility and maintainability -* Refactor `hist` algorithm code and add unit tests (#3836) -* Minor refactoring of split evaluator in `gpu_hist` (#3889) -* Removed unused leaf vector field in the tree model (#3989) -* Simplify the tree representation by combining `TreeModel` and `RegTree` classes (#3995) -* Simplify and harden tree expansion code (#4008, #4015) -* De-duplicate parameter classes in the linear model algorithms (#4013) -* Robust handling of ranges with C++20 span in `gpu_exact` and `gpu_coord_descent` (#4020, #4029) -* Simplify tree training code (#3825). Also use Span class for robust handling of ranges. - -### Maintenance: testing, continuous integration, build system -* Disallow `std::regex` since it's not supported by GCC 4.8.x (#3870) -* Add multi-GPU tests for coordinate descent algorithm for linear models (#3893, #3974) -* Enforce naming style in Python lint (#3896) -* Refactor Python tests (#3897, #3901): Use pytest exclusively, display full trace upon failure -* Address `DeprecationWarning` when using Python collections (#3909) -* Use correct group for maven site plugin (#3937) -* Jenkins CI is now using on-demand EC2 instances exclusively, due to unreliability of Spot instances (#3948) -* Better GPU performance logging (#3945) -* Fix GPU tests on machines with only 1 GPU (#4053) -* Eliminate CRAN check warnings and notes (#3988) -* Add unit tests for tree serialization (#3989) -* Add unit tests for tree fitting functions in `hist` (#4155) -* Add a unit test for `gpu_exact` algorithm (#4020) -* Correct JVM CMake GPU flag (#4071) -* Fix failing Travis CI on Mac (#4086) -* Speed up Jenkins by not compiling CMake (#4099) -* Analyze C++ and CUDA code using clang-tidy, as part of Jenkins CI pipeline (#4034) -* Fix broken R test: Install Homebrew GCC (#4142) -* Check for empty datasets in GPU unit tests (#4151) -* Fix Windows compilation (#4139) -* Comply with latest convention of cpplint (#4157) -* Fix a unit test in `gpu_hist` (#4158) -* Speed up data generation in Python tests (#4164) - -### Usability Improvements -* Add link to [InfoWorld 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html) (#4116) -* Remove outdated AWS YARN tutorial (#3885) -* Document current limitation in number of features (#3886) -* Remove unnecessary warning when `gblinear` is selected (#3888) -* Document limitation of CSV parser: header not supported (#3934) -* Log training parameters in XGBoost4J-Spark (#4091) -* Clarify early stopping behavior in the scikit-learn interface (#3967) -* Clarify behavior of `max_depth` parameter (#4078) -* Revise Python docstrings for ranking task (#4121). In particular, weights must be per-group in learning-to-rank setting. -* Document parameter `num_parallel_tree` (#4022) -* Add Jenkins status badge (#4090) -* Warn users against using internal functions of `Booster` object (#4066) -* Reformat `benchmark_tree.py` to comply with Python style convention (#4126) -* Clarify a comment in `objectiveTrait` (#4174) -* Fix typos and broken links in documentation (#3890, #3872, #3902, #3919, #3975, #4027, #4156, #4167) - -### Acknowledgement -**Contributors** (in no particular order): Jiaming Yuan (@trivialfis), Hyunsu Cho (@hcho3), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Yanbo Liang (@yanboliang), Andy Adinets (@canonizer), Tong He (@hetong007), Yuan Tang (@terrytangyuan) - -**First-time Contributors** (in no particular order): Jelle Zijlstra (@JelleZijlstra), Jiacheng Xu (@jiachengxu), @ajing, Kashif Rasul (@kashif), @theycallhimavi, Joey Gao (@pjgao), Prabakaran Kumaresshan (@nixphix), Huafeng Wang (@huafengw), @lyxthe, Sam Wilkinson (@scwilkinson), Tatsuhito Kato (@stabacov), Shayak Banerjee (@shayakbanerjee), Kodi Arfer (@Kodiologist), @KyleLi1985, Egor Smirnov (@SmirnovEgorRu), @tmitanitky, Pasha Stetsenko (@st-pasha), Kenichi Nagahara (@keni-chi), Abhai Kollara Dilip (@abhaikollara), Patrick Ford (@pford221), @hshujuan, Matthew Jones (@mt-jones), Thejaswi Rao (@teju85), Adam November (@anovember) - -**First-time Reviewers** (in no particular order): Mingyang Hu (@mingyang), Theodore Vasiloudis (@thvasilo), Jakub Troszok (@troszok), Rong Ou (@rongou), @Denisevi4, Matthew Jones (@mt-jones), Jeff Kaplan (@jeffdk) - -## v0.81 (2018.11.04) -### New feature: feature interaction constraints -* Users are now able to control which features (independent variables) are allowed to interact by specifying feature interaction constraints (#3466). -* [Tutorial](https://xgboost.readthedocs.io/en/release_0.81/tutorials/feature_interaction_constraint.html) is available, as well as [R](https://github.com/dmlc/xgboost/blob/9254c58e4dfff6a59dc0829a2ceb02e45ed17cd0/R-package/demo/interaction_constraints.R) and [Python](https://github.com/dmlc/xgboost/blob/9254c58e4dfff6a59dc0829a2ceb02e45ed17cd0/tests/python/test_interaction_constraints.py) examples. - -### New feature: learning to rank using scikit-learn interface -* Learning to rank task is now available for the scikit-learn interface of the Python package (#3560, #3848). It is now possible to integrate the XGBoost ranking model into the scikit-learn learning pipeline. -* Examples of using `XGBRanker` class is found at [demo/rank/rank_sklearn.py](https://github.com/dmlc/xgboost/blob/24a268a2e3cb17302db3d72da8f04016b7d352d9/demo/rank/rank_sklearn.py). - -### New feature: R interface for SHAP interactions -* SHAP (SHapley Additive exPlanations) is a unified approach to explain the output of any machine learning model. Previously, this feature was only available from the Python package; now it is available from the R package as well (#3636). - -### New feature: GPU predictor now use multiple GPUs to predict -* GPU predictor is now able to utilize multiple GPUs at once to accelerate prediction (#3738) - -### New feature: Scale distributed XGBoost to large-scale clusters -* Fix OS file descriptor limit assertion error on large cluster (#3835, dmlc/rabit#73) by replacing `select()` based AllReduce/Broadcast with `poll()` based implementation. -* Mitigate tracker "thundering herd" issue on large cluster. Add exponential backoff retry when workers connect to tracker. -* With this change, we were able to scale to 1.5k executors on a 12 billion row dataset after some tweaks here and there. - -### New feature: Additional objective functions for GPUs -* New objective functions ported to GPU: `hinge`, `multi:softmax`, `multi:softprob`, `count:poisson`, `reg:gamma`, `"reg:tweedie`. -* With supported objectives, XGBoost will select the correct devices based on your system and `n_gpus` parameter. - -### Major bug fix: learning to rank with XGBoost4J-Spark -* Previously, `repartitionForData` would shuffle data and lose ordering necessary for ranking task. -* To fix this issue, data points within each RDD partition is explicitly group by their group (query session) IDs (#3654). Also handle empty RDD partition carefully (#3750). - -### Major bug fix: early stopping fixed in XGBoost4J-Spark -* Earlier implementation of early stopping had incorrect semantics and didn't let users to specify direction for optimizing (maximize / minimize) -* A parameter `maximize_evaluation_metrics` is defined so as to tell whether a metric should be maximized or minimized as part of early stopping criteria (#3808). Also early stopping now has correct semantics. - -### API changes -* Column sampling by level (`colsample_bylevel`) is now functional for `hist` algorithm (#3635, #3862) -* GPU tag `gpu:` for regression objectives are now deprecated. XGBoost will select the correct devices automatically (#3643) -* Add `disable_default_eval_metric` parameter to disable default metric (#3606) -* Experimental AVX support for gradient computation is removed (#3752) -* XGBoost4J-Spark - - Add `rank:ndcg` and `rank:map` to supported objectives (#3697) -* Python package - - Add `callbacks` argument to `fit()` function of sciki-learn API (#3682) - - Add `XGBRanker` to scikit-learn interface (#3560, #3848) - - Add `validate_features` argument to `predict()` function of scikit-learn API (#3653) - - Allow scikit-learn grid search over parameters specified as keyword arguments (#3791) - - Add `coef_` and `intercept_` as properties of scikit-learn wrapper (#3855). Some scikit-learn functions expect these properties. - -### Performance improvements -* Address very high GPU memory usage for large data (#3635) -* Fix performance regression within `EvaluateSplits()` of `gpu_hist` algorithm. (#3680) - -### Bug-fixes -* Fix a problem in GPU quantile sketch with tiny instance weights. (#3628) -* Fix copy constructor for `HostDeviceVectorImpl` to prevent dangling pointers (#3657) -* Fix a bug in partitioned file loading (#3673) -* Fixed an uninitialized pointer in `gpu_hist` (#3703) -* Reshared data among GPUs when number of GPUs is changed (#3721) -* Add back `max_delta_step` to split evaluation (#3668) -* Do not round up integer thresholds for integer features in JSON dump (#3717) -* Use `dmlc::TemporaryDirectory` to handle temporaries in cross-platform way (#3783) -* Fix accuracy problem with `gpu_hist` when `min_child_weight` and `lambda` are set to 0 (#3793) -* Make sure that `tree_method` parameter is recognized and not silently ignored (#3849) -* XGBoost4J-Spark - - Make sure `thresholds` are considered when executing `predict()` method (#3577) - - Avoid losing precision when computing probabilities by converting to `Double` early (#3576) - - `getTreeLimit()` should return `Int` (#3602) - - Fix checkpoint serialization on HDFS (#3614) - - Throw `ControlThrowable` instead of `InterruptedException` so that it is properly re-thrown (#3632) - - Remove extraneous output to stdout (#3665) - - Allow specification of task type for custom objectives and evaluations (#3646) - - Fix distributed updater check (#3739) - - Fix issue when spark job execution thread cannot return before we execute `first()` (#3758) -* Python package - - Fix accessing `DMatrix.handle` before it is set (#3599) - - `XGBClassifier.predict()` should return margin scores when `output_margin` is set to true (#3651) - - Early stopping callback should maximize metric of form `NDCG@n-` (#3685) - - Preserve feature names when slicing `DMatrix` (#3766) -* R package - - Replace `nround` with `nrounds` to match actual parameter (#3592) - - Amend `xgb.createFolds` to handle classes of a single element (#3630) - - Fix buggy random generator and make `colsample_bytree` functional (#3781) - -### Maintenance: testing, continuous integration, build system -* Add sanitizers tests to Travis CI (#3557) -* Add NumPy, Matplotlib, Graphviz as requirements for doc build (#3669) -* Comply with CRAN submission policy (#3660, #3728) -* Remove copy-paste error in JVM test suite (#3692) -* Disable flaky tests in `R-package/tests/testthat/test_update.R` (#3723) -* Make Python tests compatible with scikit-learn 0.20 release (#3731) -* Separate out restricted and unrestricted tasks, so that pull requests don't build downloadable artifacts (#3736) -* Add multi-GPU unit test environment (#3741) -* Allow plug-ins to be built by CMake (#3752) -* Test wheel compatibility on CPU containers for pull requests (#3762) -* Fix broken doc build due to Matplotlib 3.0 release (#3764) -* Produce `xgboost.so` for XGBoost-R on Mac OSX, so that `make install` works (#3767) -* Retry Jenkins CI tests up to 3 times to improve reliability (#3769, #3769, #3775, #3776, #3777) -* Add basic unit tests for `gpu_hist` algorithm (#3785) -* Fix Python environment for distributed unit tests (#3806) -* Test wheels on CUDA 10.0 container for compatibility (#3838) -* Fix JVM doc build (#3853) - -### Maintenance: Refactor C++ code for legibility and maintainability -* Merge generic device helper functions into `GPUSet` class (#3626) -* Re-factor column sampling logic into `ColumnSampler` class (#3635, #3637) -* Replace `std::vector` with `HostDeviceVector` in `MetaInfo` and `SparsePage` (#3446) -* Simplify `DMatrix` class (#3395) -* De-duplicate CPU/GPU code using `Transform` class (#3643, #3751) -* Remove obsoleted `QuantileHistMaker` class (#3761) -* Remove obsoleted `NoConstraint` class (#3792) - -### Other Features -* C++20-compliant Span class for safe pointer indexing (#3548, #3588) -* Add helper functions to manipulate multiple GPU devices (#3693) -* XGBoost4J-Spark - - Allow specifying host ip from the `xgboost-tracker.properties file` (#3833). This comes in handy when `hosts` files doesn't correctly define localhost. - -### Usability Improvements -* Add reference to GitHub repository in `pom.xml` of JVM packages (#3589) -* Add R demo of multi-class classification (#3695) -* Document JSON dump functionality (#3600, #3603) -* Document CUDA requirement and lack of external memory for GPU algorithms (#3624) -* Document LambdaMART objectives, both pairwise and listwise (#3672) -* Document `aucpr` evaluation metric (#3687) -* Document gblinear parameters: `feature_selector` and `top_k` (#3780) -* Add instructions for using MinGW-built XGBoost with Python. (#3774) -* Removed nonexistent parameter `use_buffer` from documentation (#3610) -* Update Python API doc to include all classes and members (#3619, #3682) -* Fix typos and broken links in documentation (#3618, #3640, #3676, #3713, #3759, #3784, #3843, #3852) -* Binary classification demo should produce LIBSVM with 0-based indexing (#3652) -* Process data once for Python and CLI examples of learning to rank (#3666) -* Include full text of Apache 2.0 license in the repository (#3698) -* Save predictor parameters in model file (#3856) -* JVM packages - - Let users specify feature names when calling `getModelDump` and `getFeatureScore` (#3733) - - Warn the user about the lack of over-the-wire encryption (#3667) - - Fix errors in examples (#3719) - - Document choice of trackers (#3831) - - Document that vanilla Apache Spark is required (#3854) -* Python package - - Document that custom objective can't contain colon (:) (#3601) - - Show a better error message for failed library loading (#3690) - - Document that feature importance is unavailable for non-tree learners (#3765) - - Document behavior of `get_fscore()` for zero-importance features (#3763) - - Recommend pickling as the way to save `XGBClassifier` / `XGBRegressor` / `XGBRanker` (#3829) -* R package - - Enlarge variable importance plot to make it more visible (#3820) - -### BREAKING CHANGES -* External memory page files have changed, breaking backwards compatibility for temporary storage used during external memory training. This only affects external memory users upgrading their xgboost version - we recommend clearing all `*.page` files before resuming training. Model serialization is unaffected. - -### Known issues -* Quantile sketcher fails to produce any quantile for some edge cases (#2943) -* The `hist` algorithm leaks memory when used with learning rate decay callback (#3579) -* Using custom evaluation funciton together with early stopping causes assertion failure in XGBoost4J-Spark (#3595) -* Early stopping doesn't work with `gblinear` learner (#3789) -* Label and weight vectors are not reshared upon the change in number of GPUs (#3794). To get around this issue, delete the `DMatrix` object and re-load. -* The `DMatrix` Python objects are initialized with incorrect values when given array slices (#3841) -* The `gpu_id` parameter is broken and not yet properly supported (#3850) - -### Acknowledgement -**Contributors** (in no particular order): Hyunsu Cho (@hcho3), Jiaming Yuan (@trivialfis), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Andy Adinets (@canonizer), Vadim Khotilovich (@khotilov), Sergei Lebedev (@superbobry) - -**First-time Contributors** (in no particular order): Matthew Tovbin (@tovbinm), Jakob Richter (@jakob-r), Grace Lam (@grace-lam), Grant W Schneider (@grantschneider), Andrew Thia (@BlueTea88), Sergei Chipiga (@schipiga), Joseph Bradley (@jkbradley), Chen Qin (@chenqin), Jerry Lin (@linjer), Dmitriy Rybalko (@rdtft), Michael Mui (@mmui), Takahiro Kojima (@515hikaru), Bruce Zhao (@BruceZhaoR), Wei Tian (@weitian), Saumya Bhatnagar (@Sam1301), Juzer Shakir (@JuzerShakir), Zhao Hang (@cleghom), Jonathan Friedman (@jontonsoup), Bruno Tremblay (@meztez), Boris Filippov (@frenzykryger), @Shiki-H, @mrgutkun, @gorogm, @htgeis, @jakehoare, @zengxy, @KOLANICH - -**First-time Reviewers** (in no particular order): Nikita Titov (@StrikerRUS), Xiangrui Meng (@mengxr), Nirmal Borah (@Nirmal-Neel) - - -## v0.80 (2018.08.13) -* **JVM packages received a major upgrade**: To consolidate the APIs and improve the user experience, we refactored the design of XGBoost4J-Spark in a significant manner. (#3387) - - Consolidated APIs: It is now much easier to integrate XGBoost models into a Spark ML pipeline. Users can control behaviors like output leaf prediction results by setting corresponding column names. Training is now more consistent with other Estimators in Spark MLLIB: there is now one single method `fit()` to train decision trees. - - Better user experience: we refactored the parameters relevant modules in XGBoost4J-Spark to provide both camel-case (Spark ML style) and underscore (XGBoost style) parameters - - A brand-new tutorial is [available](https://xgboost.readthedocs.io/en/release_0.80/jvm/xgboost4j_spark_tutorial.html) for XGBoost4J-Spark. - - Latest API documentation is now hosted at https://xgboost.readthedocs.io/. -* XGBoost documentation now keeps track of multiple versions: - - Latest master: https://xgboost.readthedocs.io/en/latest - - 0.80 stable: https://xgboost.readthedocs.io/en/release_0.80 - - 0.72 stable: https://xgboost.readthedocs.io/en/release_0.72 -* Support for per-group weights in ranking objective (#3379) -* Fix inaccurate decimal parsing (#3546) -* New functionality - - Query ID column support in LIBSVM data files (#2749). This is convenient for performing ranking task in distributed setting. - - Hinge loss for binary classification (`binary:hinge`) (#3477) - - Ability to specify delimiter and instance weight column for CSV files (#3546) - - Ability to use 1-based indexing instead of 0-based (#3546) -* GPU support - - Quantile sketch, binning, and index compression are now performed on GPU, eliminating PCIe transfer for 'gpu_hist' algorithm (#3319, #3393) - - Upgrade to NCCL2 for multi-GPU training (#3404). - - Use shared memory atomics for faster training (#3384). - - Dynamically allocate GPU memory, to prevent large allocations for deep trees (#3519) - - Fix memory copy bug for large files (#3472) -* Python package - - Importing data from Python datatable (#3272) - - Pre-built binary wheels available for 64-bit Linux and Windows (#3424, #3443) - - Add new importance measures 'total_gain', 'total_cover' (#3498) - - Sklearn API now supports saving and loading models (#3192) - - Arbitrary cross validation fold indices (#3353) - - `predict()` function in Sklearn API uses `best_ntree_limit` if available, to make early stopping easier to use (#3445) - - Informational messages are now directed to Python's `print()` rather than standard output (#3438). This way, messages appear inside Jupyter notebooks. -* R package - - Oracle Solaris support, per CRAN policy (#3372) -* JVM packages - - Single-instance prediction (#3464) - - Pre-built JARs are now available from Maven Central (#3401) - - Add NULL pointer check (#3021) - - Consider `spark.task.cpus` when controlling parallelism (#3530) - - Handle missing values in prediction (#3529) - - Eliminate outputs of `System.out` (#3572) -* Refactored C++ DMatrix class for simplicity and de-duplication (#3301) -* Refactored C++ histogram facilities (#3564) -* Refactored constraints / regularization mechanism for split finding (#3335, #3429). Users may specify an elastic net (L2 + L1 regularization) on leaf weights as well as monotonic constraints on test nodes. The refactor will be useful for a future addition of feature interaction constraints. -* Statically link `libstdc++` for MinGW32 (#3430) -* Enable loading from `group`, `base_margin` and `weight` (see [here](http://xgboost.readthedocs.io/en/latest/tutorials/input_format.html#auxiliary-files-for-additional-information)) for Python, R, and JVM packages (#3431) -* Fix model saving for `count:possion` so that `max_delta_step` doesn't get truncated (#3515) -* Fix loading of sparse CSC matrix (#3553) -* Fix incorrect handling of `base_score` parameter for Tweedie regression (#3295) - -## v0.72.1 (2018.07.08) -This version is only applicable for the Python package. The content is identical to that of v0.72. - -## v0.72 (2018.06.01) -* Starting with this release, we plan to make a new release every two months. See #3252 for more details. -* Fix a pathological behavior (near-zero second-order gradients) in multiclass objective (#3304) -* Tree dumps now use high precision in storing floating-point values (#3298) -* Submodules `rabit` and `dmlc-core` have been brought up to date, bringing bug fixes (#3330, #3221). -* GPU support - - Continuous integration tests for GPU code (#3294, #3309) - - GPU accelerated coordinate descent algorithm (#3178) - - Abstract 1D vector class now works with multiple GPUs (#3287) - - Generate PTX code for most recent architecture (#3316) - - Fix a memory bug on NVIDIA K80 cards (#3293) - - Address performance instability for single-GPU, multi-core machines (#3324) -* Python package - - FreeBSD support (#3247) - - Validation of feature names in `Booster.predict()` is now optional (#3323) -* Updated Sklearn API - - Validation sets now support instance weights (#2354) - - `XGBClassifier.predict_proba()` should not support `output_margin` option. (#3343) See BREAKING CHANGES below. -* R package: - - Better handling of NULL in `print.xgb.Booster()` (#3338) - - Comply with CRAN policy by removing compiler warning suppression (#3329) - - Updated CRAN submission -* JVM packages - - JVM packages will now use the same versioning scheme as other packages (#3253) - - Update Spark to 2.3 (#3254) - - Add scripts to cross-build and deploy artifacts (#3276, #3307) - - Fix a compilation error for Scala 2.10 (#3332) -* BREAKING CHANGES - - `XGBClassifier.predict_proba()` no longer accepts paramter `output_margin`. The paramater makes no sense for `predict_proba()` because the method is to predict class probabilities, not raw margin scores. - -## v0.71 (2018.04.11) -* This is a minor release, mainly motivated by issues concerning `pip install`, e.g. #2426, #3189, #3118, and #3194. - With this release, users of Linux and MacOS will be able to run `pip install` for the most part. -* Refactored linear booster class (`gblinear`), so as to support multiple coordinate descent updaters (#3103, #3134). See BREAKING CHANGES below. -* Fix slow training for multiclass classification with high number of classes (#3109) -* Fix a corner case in approximate quantile sketch (#3167). Applicable for 'hist' and 'gpu_hist' algorithms -* Fix memory leak in DMatrix (#3182) -* New functionality - - Better linear booster class (#3103, #3134) - - Pairwise SHAP interaction effects (#3043) - - Cox loss (#3043) - - AUC-PR metric for ranking task (#3172) - - Monotonic constraints for 'hist' algorithm (#3085) -* GPU support - - Create an abtract 1D vector class that moves data seamlessly between the main and GPU memory (#2935, #3116, #3068). This eliminates unnecessary PCIe data transfer during training time. - - Fix minor bugs (#3051, #3217) - - Fix compatibility error for CUDA 9.1 (#3218) -* Python package: - - Correctly handle parameter `verbose_eval=0` (#3115) -* R package: - - Eliminate segmentation fault on 32-bit Windows platform (#2994) -* JVM packages - - Fix a memory bug involving double-freeing Booster objects (#3005, #3011) - - Handle empty partition in predict (#3014) - - Update docs and unify terminology (#3024) - - Delete cache files after job finishes (#3022) - - Compatibility fixes for latest Spark versions (#3062, #3093) -* BREAKING CHANGES: Updated linear modelling algorithms. In particular L1/L2 regularisation penalties are now normalised to number of training examples. This makes the implementation consistent with sklearn/glmnet. L2 regularisation has also been removed from the intercept. To produce linear models with the old regularisation behaviour, the alpha/lambda regularisation parameters can be manually scaled by dividing them by the number of training examples. - -## v0.7 (2017.12.30) -* **This version represents a major change from the last release (v0.6), which was released one year and half ago.** -* Updated Sklearn API - - Add compatibility layer for scikit-learn v0.18: `sklearn.cross_validation` now deprecated - - Updated to allow use of all XGBoost parameters via `**kwargs`. - - Updated `nthread` to `n_jobs` and `seed` to `random_state` (as per Sklearn convention); `nthread` and `seed` are now marked as deprecated - - Updated to allow choice of Booster (`gbtree`, `gblinear`, or `dart`) - - `XGBRegressor` now supports instance weights (specify `sample_weight` parameter) - - Pass `n_jobs` parameter to the `DMatrix` constructor - - Add `xgb_model` parameter to `fit` method, to allow continuation of training -* Refactored gbm to allow more friendly cache strategy - - Specialized some prediction routine -* Robust `DMatrix` construction from a sparse matrix -* Faster consturction of `DMatrix` from 2D NumPy matrices: elide copies, use of multiple threads -* Automatically remove nan from input data when it is sparse. - - This can solve some of user reported problem of istart != hist.size -* Fix the single-instance prediction function to obtain correct predictions -* Minor fixes - - Thread local variable is upgraded so it is automatically freed at thread exit. - - Fix saving and loading `count::poisson` models - - Fix CalcDCG to use base-2 logarithm - - Messages are now written to stderr instead of stdout - - Keep built-in evaluations while using customized evaluation functions - - Use `bst_float` consistently to minimize type conversion - - Copy the base margin when slicing `DMatrix` - - Evaluation metrics are now saved to the model file - - Use `int32_t` explicitly when serializing version - - In distributed training, synchronize the number of features after loading a data matrix. -* Migrate to C++11 - - The current master version now requires C++11 enabled compiled(g++4.8 or higher) -* Predictor interface was factored out (in a manner similar to the updater interface). -* Makefile support for Solaris and ARM -* Test code coverage using Codecov -* Add CPP tests -* Add `Dockerfile` and `Jenkinsfile` to support continuous integration for GPU code -* New functionality - - Ability to adjust tree model's statistics to a new dataset without changing tree structures. - - Ability to extract feature contributions from individual predictions, as described in [here](http://blog.datadive.net/interpreting-random-forests/) and [here](https://arxiv.org/abs/1706.06060). - - Faster, histogram-based tree algorithm (`tree_method='hist'`) . - - GPU/CUDA accelerated tree algorithms (`tree_method='gpu_hist'` or `'gpu_exact'`), including the GPU-based predictor. - - Monotonic constraints: when other features are fixed, force the prediction to be monotonic increasing with respect to a certain specified feature. - - Faster gradient caculation using AVX SIMD - - Ability to export models in JSON format - - Support for Tweedie regression - - Additional dropout options for DART: binomial+1, epsilon - - Ability to update an existing model in-place: this is useful for many applications, such as determining feature importance -* Python package: - - New parameters: - - `learning_rates` in `cv()` - - `shuffle` in `mknfold()` - - `max_features` and `show_values` in `plot_importance()` - - `sample_weight` in `XGBRegressor.fit()` - - Support binary wheel builds - - Fix `MultiIndex` detection to support Pandas 0.21.0 and higher - - Support metrics and evaluation sets whose names contain `-` - - Support feature maps when plotting trees - - Compatibility fix for Python 2.6 - - Call `print_evaluation` callback at last iteration - - Use appropriate integer types when calling native code, to prevent truncation and memory error - - Fix shared library loading on Mac OS X -* R package: - - New parameters: - - `silent` in `xgb.DMatrix()` - - `use_int_id` in `xgb.model.dt.tree()` - - `predcontrib` in `predict()` - - `monotone_constraints` in `xgb.train()` - - Default value of the `save_period` parameter in `xgboost()` changed to NULL (consistent with `xgb.train()`). - - It's possible to custom-build the R package with GPU acceleration support. - - Enable JVM build for Mac OS X and Windows - - Integration with AppVeyor CI - - Improved safety for garbage collection - - Store numeric attributes with higher precision - - Easier installation for devel version - - Improved `xgb.plot.tree()` - - Various minor fixes to improve user experience and robustness - - Register native code to pass CRAN check - - Updated CRAN submission -* JVM packages - - Add Spark pipeline persistence API - - Fix data persistence: loss evaluation on test data had wrongly used caches for training data. - - Clean external cache after training - - Implement early stopping - - Enable training of multiple models by distinguishing stage IDs - - Better Spark integration: support RDD / dataframe / dataset, integrate with Spark ML package - - XGBoost4j now supports ranking task - - Support training with missing data - - Refactor JVM package to separate regression and classification models to be consistent with other machine learning libraries - - Support XGBoost4j compilation on Windows - - Parameter tuning tool - - Publish source code for XGBoost4j to maven local repo - - Scala implementation of the Rabit tracker (drop-in replacement for the Java implementation) - - Better exception handling for the Rabit tracker - - Persist `num_class`, number of classes (for classification task) - - `XGBoostModel` now holds `BoosterParams` - - libxgboost4j is now part of CMake build - - Release `DMatrix` when no longer needed, to conserve memory - - Expose `baseMargin`, to allow initialization of boosting with predictions from an external model - - Support instance weights - - Use `SparkParallelismTracker` to prevent jobs from hanging forever - - Expose train-time evaluation metrics via `XGBoostModel.summary` - - Option to specify `host-ip` explicitly in the Rabit tracker -* Documentation - - Better math notation for gradient boosting - - Updated build instructions for Mac OS X - - Template for GitHub issues - - Add `CITATION` file for citing XGBoost in scientific writing - - Fix dropdown menu in xgboost.readthedocs.io - - Document `updater_seq` parameter - - Style fixes for Python documentation - - Links to additional examples and tutorials - - Clarify installation requirements -* Changes that break backward compatibility - - [#1519](https://github.com/dmlc/xgboost/pull/1519) XGBoost-spark no longer contains APIs for DMatrix; use the public booster interface instead. - - [#2476](https://github.com/dmlc/xgboost/pull/2476) `XGBoostModel.predict()` now has a different signature - - -## v0.6 (2016.07.29) -* Version 0.5 is skipped due to major improvements in the core -* Major refactor of core library. - - Goal: more flexible and modular code as a portable library. - - Switch to use of c++11 standard code. - - Random number generator defaults to ```std::mt19937```. - - Share the data loading pipeline and logging module from dmlc-core. - - Enable registry pattern to allow optionally plugin of objective, metric, tree constructor, data loader. - - Future plugin modules can be put into xgboost/plugin and register back to the library. - - Remove most of the raw pointers to smart ptrs, for RAII safety. -* Add official option to approximate algorithm `tree_method` to parameter. - - Change default behavior to switch to prefer faster algorithm. - - User will get a message when approximate algorithm is chosen. -* Change library name to libxgboost.so -* Backward compatiblity - - The binary buffer file is not backward compatible with previous version. - - The model file is backward compatible on 64 bit platforms. -* The model file is compatible between 64/32 bit platforms(not yet tested). -* External memory version and other advanced features will be exposed to R library as well on linux. - - Previously some of the features are blocked due to C++11 and threading limits. - - The windows version is still blocked due to Rtools do not support ```std::thread```. -* rabit and dmlc-core are maintained through git submodule - - Anyone can open PR to update these dependencies now. -* Improvements - - Rabit and xgboost libs are not thread-safe and use thread local PRNGs - - This could fix some of the previous problem which runs xgboost on multiple threads. -* JVM Package - - Enable xgboost4j for java and scala - - XGBoost distributed now runs on Flink and Spark. -* Support model attributes listing for meta data. - - https://github.com/dmlc/xgboost/pull/1198 - - https://github.com/dmlc/xgboost/pull/1166 -* Support callback API - - https://github.com/dmlc/xgboost/issues/892 - - https://github.com/dmlc/xgboost/pull/1211 - - https://github.com/dmlc/xgboost/pull/1264 -* Support new booster DART(dropout in tree boosting) - - https://github.com/dmlc/xgboost/pull/1220 -* Add CMake build system - - https://github.com/dmlc/xgboost/pull/1314 - -## v0.47 (2016.01.14) - -* Changes in R library - - fixed possible problem of poisson regression. - - switched from 0 to NA for missing values. - - exposed access to additional model parameters. -* Changes in Python library - - throws exception instead of crash terminal when a parameter error happens. - - has importance plot and tree plot functions. - - accepts different learning rates for each boosting round. - - allows model training continuation from previously saved model. - - allows early stopping in CV. - - allows feval to return a list of tuples. - - allows eval_metric to handle additional format. - - improved compatibility in sklearn module. - - additional parameters added for sklearn wrapper. - - added pip installation functionality. - - supports more Pandas DataFrame dtypes. - - added best_ntree_limit attribute, in addition to best_score and best_iteration. -* Java api is ready for use -* Added more test cases and continuous integration to make each build more robust. - -## v0.4 (2015.05.11) - -* Distributed version of xgboost that runs on YARN, scales to billions of examples -* Direct save/load data and model from/to S3 and HDFS -* Feature importance visualization in R module, by Michael Benesty -* Predict leaf index -* Poisson regression for counts data -* Early stopping option in training -* Native save load support in R and python - - xgboost models now can be saved using save/load in R - - xgboost python model is now pickable -* sklearn wrapper is supported in python module -* Experimental External memory version - - -## v0.3 (2014.09.07) - -* Faster tree construction module - - Allows subsample columns during tree construction via ```bst:col_samplebytree=ratio``` -* Support for boosting from initial predictions -* Experimental version of LambdaRank -* Linear booster is now parallelized, using parallel coordinated descent. -* Add [Code Guide](src/README.md) for customizing objective function and evaluation -* Add R module - - -## v0.2x (2014.05.20) - -* Python module -* Weighted samples instances -* Initial version of pairwise rank - - -## v0.1 (2014.03.26) - -* Initial release diff --git a/ml-xgboost/R-package/.Rbuildignore b/ml-xgboost/R-package/.Rbuildignore deleted file mode 100644 index b37d627..0000000 --- a/ml-xgboost/R-package/.Rbuildignore +++ /dev/null @@ -1,6 +0,0 @@ -\.o$ -\.so$ -\.dll$ -^.*\.Rproj$ -^\.Rproj\.user$ -README.md diff --git a/ml-xgboost/R-package/CMakeLists.txt b/ml-xgboost/R-package/CMakeLists.txt deleted file mode 100644 index 96776a0..0000000 --- a/ml-xgboost/R-package/CMakeLists.txt +++ /dev/null @@ -1,38 +0,0 @@ -find_package(LibR REQUIRED) -message(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY}) - -file(GLOB_RECURSE R_SOURCES - ${CMAKE_CURRENT_LIST_DIR}/src/*.cc - ${CMAKE_CURRENT_LIST_DIR}/src/*.c) -# Use object library to expose symbols -add_library(xgboost-r OBJECT ${R_SOURCES}) - -set(R_DEFINITIONS - -DXGBOOST_STRICT_R_MODE=1 - -DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1 - -DDMLC_LOG_BEFORE_THROW=0 - -DDMLC_DISABLE_STDIN=1 - -DDMLC_LOG_CUSTOMIZE=1 - -DRABIT_CUSTOMIZE_MSG_ - -DRABIT_STRICT_CXX98_) -target_compile_definitions(xgboost-r - PRIVATE ${R_DEFINITIONS}) -target_include_directories(xgboost-r - PRIVATE - ${LIBR_INCLUDE_DIRS} - ${PROJECT_SOURCE_DIR}/include - ${PROJECT_SOURCE_DIR}/dmlc-core/include - ${PROJECT_SOURCE_DIR}/rabit/include) -set_target_properties( - xgboost-r PROPERTIES - CXX_STANDARD 14 - CXX_STANDARD_REQUIRED ON - POSITION_INDEPENDENT_CODE ON) - -set(XGBOOST_DEFINITIONS "${XGBOOST_DEFINITIONS};${R_DEFINITIONS}" PARENT_SCOPE) -set(XGBOOST_OBJ_SOURCES $ PARENT_SCOPE) -set(LINKED_LIBRARIES_PRIVATE ${LINKED_LIBRARIES_PRIVATE} ${LIBR_CORE_LIBRARY} PARENT_SCOPE) - -if (USE_OPENMP) - target_link_libraries(xgboost-r PRIVATE OpenMP::OpenMP_CXX) -endif () diff --git a/ml-xgboost/R-package/DESCRIPTION b/ml-xgboost/R-package/DESCRIPTION deleted file mode 100644 index 794abdb..0000000 --- a/ml-xgboost/R-package/DESCRIPTION +++ /dev/null @@ -1,67 +0,0 @@ -Package: xgboost -Type: Package -Title: Extreme Gradient Boosting -Version: 1.1.0.1 -Date: 2020-02-21 -Authors@R: c( - person("Tianqi", "Chen", role = c("aut"), - email = "tianqi.tchen@gmail.com"), - person("Tong", "He", role = c("aut", "cre"), - email = "hetong007@gmail.com"), - person("Michael", "Benesty", role = c("aut"), - email = "michael@benesty.fr"), - person("Vadim", "Khotilovich", role = c("aut"), - email = "khotilovich@gmail.com"), - person("Yuan", "Tang", role = c("aut"), - email = "terrytangyuan@gmail.com", - comment = c(ORCID = "0000-0001-5243-233X")), - person("Hyunsu", "Cho", role = c("aut"), - email = "chohyu01@cs.washington.edu"), - person("Kailong", "Chen", role = c("aut")), - person("Rory", "Mitchell", role = c("aut")), - person("Ignacio", "Cano", role = c("aut")), - person("Tianyi", "Zhou", role = c("aut")), - person("Mu", "Li", role = c("aut")), - person("Junyuan", "Xie", role = c("aut")), - person("Min", "Lin", role = c("aut")), - person("Yifeng", "Geng", role = c("aut")), - person("Yutian", "Li", role = c("aut")), - person("XGBoost contributors", role = c("cph"), - comment = "base XGBoost implementation") - ) -Description: Extreme Gradient Boosting, which is an efficient implementation - of the gradient boosting framework from Chen & Guestrin (2016) . - This package is its R interface. The package includes efficient linear - model solver and tree learning algorithms. The package can automatically - do parallel computation on a single machine which could be more than 10 - times faster than existing gradient boosting packages. It supports - various objective functions, including regression, classification and ranking. - The package is made to be extensible, so that users are also allowed to define - their own objectives easily. -License: Apache License (== 2.0) | file LICENSE -URL: https://github.com/dmlc/xgboost -BugReports: https://github.com/dmlc/xgboost/issues -NeedsCompilation: yes -VignetteBuilder: knitr -Suggests: - knitr, - rmarkdown, - ggplot2 (>= 1.0.1), - DiagrammeR (>= 0.9.0), - Ckmeans.1d.dp (>= 3.3.1), - vcd (>= 1.3), - testthat, - lintr, - igraph (>= 1.0.1), - jsonlite, - float -Depends: - R (>= 3.3.0) -Imports: - Matrix (>= 1.1-0), - methods, - data.table (>= 1.9.6), - magrittr (>= 1.5), - stringi (>= 0.5.2) -RoxygenNote: 7.1.0 -SystemRequirements: GNU make, C++11 diff --git a/ml-xgboost/R-package/LICENSE b/ml-xgboost/R-package/LICENSE deleted file mode 100644 index b9f38c3..0000000 --- a/ml-xgboost/R-package/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2014 by Tianqi Chen and Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/ml-xgboost/R-package/NAMESPACE b/ml-xgboost/R-package/NAMESPACE deleted file mode 100644 index fb0ac54..0000000 --- a/ml-xgboost/R-package/NAMESPACE +++ /dev/null @@ -1,91 +0,0 @@ -# Generated by roxygen2: do not edit by hand - -S3method("[",xgb.DMatrix) -S3method("dimnames<-",xgb.DMatrix) -S3method(dim,xgb.DMatrix) -S3method(dimnames,xgb.DMatrix) -S3method(getinfo,xgb.DMatrix) -S3method(predict,xgb.Booster) -S3method(predict,xgb.Booster.handle) -S3method(print,xgb.Booster) -S3method(print,xgb.DMatrix) -S3method(print,xgb.cv.synchronous) -S3method(setinfo,xgb.DMatrix) -S3method(slice,xgb.DMatrix) -export("xgb.attr<-") -export("xgb.attributes<-") -export("xgb.config<-") -export("xgb.parameters<-") -export(cb.cv.predict) -export(cb.early.stop) -export(cb.evaluation.log) -export(cb.gblinear.history) -export(cb.print.evaluation) -export(cb.reset.parameters) -export(cb.save.model) -export(getinfo) -export(setinfo) -export(slice) -export(xgb.Booster.complete) -export(xgb.DMatrix) -export(xgb.DMatrix.save) -export(xgb.attr) -export(xgb.attributes) -export(xgb.config) -export(xgb.create.features) -export(xgb.cv) -export(xgb.dump) -export(xgb.gblinear.history) -export(xgb.ggplot.deepness) -export(xgb.ggplot.importance) -export(xgb.importance) -export(xgb.load) -export(xgb.load.raw) -export(xgb.model.dt.tree) -export(xgb.plot.deepness) -export(xgb.plot.importance) -export(xgb.plot.multi.trees) -export(xgb.plot.shap) -export(xgb.plot.tree) -export(xgb.save) -export(xgb.save.raw) -export(xgb.serialize) -export(xgb.train) -export(xgb.unserialize) -export(xgboost) -import(methods) -importClassesFrom(Matrix,dgCMatrix) -importClassesFrom(Matrix,dgeMatrix) -importFrom(Matrix,colSums) -importFrom(Matrix,sparse.model.matrix) -importFrom(Matrix,sparseMatrix) -importFrom(Matrix,sparseVector) -importFrom(Matrix,t) -importFrom(data.table,":=") -importFrom(data.table,as.data.table) -importFrom(data.table,data.table) -importFrom(data.table,is.data.table) -importFrom(data.table,rbindlist) -importFrom(data.table,setkey) -importFrom(data.table,setkeyv) -importFrom(data.table,setnames) -importFrom(grDevices,rgb) -importFrom(graphics,barplot) -importFrom(graphics,grid) -importFrom(graphics,lines) -importFrom(graphics,par) -importFrom(graphics,points) -importFrom(graphics,title) -importFrom(magrittr,"%>%") -importFrom(stats,median) -importFrom(stats,predict) -importFrom(stringi,stri_detect_regex) -importFrom(stringi,stri_match_first_regex) -importFrom(stringi,stri_replace_all_regex) -importFrom(stringi,stri_replace_first_regex) -importFrom(stringi,stri_split_regex) -importFrom(utils,head) -importFrom(utils,object.size) -importFrom(utils,str) -importFrom(utils,tail) -useDynLib(xgboost, .registration = TRUE) diff --git a/ml-xgboost/R-package/R/callbacks.R b/ml-xgboost/R-package/R/callbacks.R deleted file mode 100644 index e6f9f04..0000000 --- a/ml-xgboost/R-package/R/callbacks.R +++ /dev/null @@ -1,831 +0,0 @@ -#' Callback closures for booster training. -#' -#' These are used to perform various service tasks either during boosting iterations or at the end. -#' This approach helps to modularize many of such tasks without bloating the main training methods, -#' and it offers . -#' -#' @details -#' By default, a callback function is run after each boosting iteration. -#' An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function. -#' -#' When a callback function has \code{finalize} parameter, its finalizer part will also be run after -#' the boosting is completed. -#' -#' WARNING: side-effects!!! Be aware that these callback functions access and modify things in -#' the environment from which they are called from, which is a fairly uncommon thing to do in R. -#' -#' To write a custom callback closure, make sure you first understand the main concepts about R environments. -#' Check either R documentation on \code{\link[base]{environment}} or the -#' \href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R" -#' book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks - -#' choose ones that do something similar to what you want to achieve. Also, you would need to get familiar -#' with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments. -#' -#' @seealso -#' \code{\link{cb.print.evaluation}}, -#' \code{\link{cb.evaluation.log}}, -#' \code{\link{cb.reset.parameters}}, -#' \code{\link{cb.early.stop}}, -#' \code{\link{cb.save.model}}, -#' \code{\link{cb.cv.predict}}, -#' \code{\link{xgb.train}}, -#' \code{\link{xgb.cv}} -#' -#' @name callbacks -NULL - -# -# Callbacks ------------------------------------------------------------------- -# - -#' Callback closure for printing the result of evaluation -#' -#' @param period results would be printed every number of periods -#' @param showsd whether standard deviations should be printed (when available) -#' -#' @details -#' The callback function prints the result of evaluation at every \code{period} iterations. -#' The initial and the last iteration's evaluations are always printed. -#' -#' Callback function expects the following values to be set in its calling frame: -#' \code{bst_evaluation} (also \code{bst_evaluation_err} when available), -#' \code{iteration}, -#' \code{begin_iteration}, -#' \code{end_iteration}. -#' -#' @seealso -#' \code{\link{callbacks}} -#' -#' @export -cb.print.evaluation <- function(period = 1, showsd = TRUE) { - - callback <- function(env = parent.frame()) { - if (length(env$bst_evaluation) == 0 || - period == 0 || - NVL(env$rank, 0) != 0 ) - return() - - i <- env$iteration - if ((i-1) %% period == 0 || - i == env$begin_iteration || - i == env$end_iteration) { - stdev <- if (showsd) env$bst_evaluation_err else NULL - msg <- format.eval.string(i, env$bst_evaluation, stdev) - cat(msg, '\n') - } - } - attr(callback, 'call') <- match.call() - attr(callback, 'name') <- 'cb.print.evaluation' - callback -} - - -#' Callback closure for logging the evaluation history -#' -#' @details -#' This callback function appends the current iteration evaluation results \code{bst_evaluation} -#' available in the calling parent frame to the \code{evaluation_log} list in a calling frame. -#' -#' The finalizer callback (called with \code{finalize = TURE} in the end) converts -#' the \code{evaluation_log} list into a final data.table. -#' -#' The iteration evaluation result \code{bst_evaluation} must be a named numeric vector. -#' -#' Note: in the column names of the final data.table, the dash '-' character is replaced with -#' the underscore '_' in order to make the column names more like regular R identifiers. -#' -#' Callback function expects the following values to be set in its calling frame: -#' \code{evaluation_log}, -#' \code{bst_evaluation}, -#' \code{iteration}. -#' -#' @seealso -#' \code{\link{callbacks}} -#' -#' @export -cb.evaluation.log <- function() { - - mnames <- NULL - - init <- function(env) { - if (!is.list(env$evaluation_log)) - stop("'evaluation_log' has to be a list") - mnames <<- names(env$bst_evaluation) - if (is.null(mnames) || any(mnames == "")) - stop("bst_evaluation must have non-empty names") - - mnames <<- gsub('-', '_', names(env$bst_evaluation)) - if(!is.null(env$bst_evaluation_err)) - mnames <<- c(paste0(mnames, '_mean'), paste0(mnames, '_std')) - } - - finalizer <- function(env) { - env$evaluation_log <- as.data.table(t(simplify2array(env$evaluation_log))) - setnames(env$evaluation_log, c('iter', mnames)) - - if(!is.null(env$bst_evaluation_err)) { - # rearrange col order from _mean,_mean,...,_std,_std,... - # to be _mean,_std,_mean,_std,... - len <- length(mnames) - means <- mnames[seq_len(len/2)] - stds <- mnames[(len/2 + 1):len] - cnames <- numeric(len) - cnames[c(TRUE, FALSE)] <- means - cnames[c(FALSE, TRUE)] <- stds - env$evaluation_log <- env$evaluation_log[, c('iter', cnames), with = FALSE] - } - } - - callback <- function(env = parent.frame(), finalize = FALSE) { - if (is.null(mnames)) - init(env) - - if (finalize) - return(finalizer(env)) - - ev <- env$bst_evaluation - if(!is.null(env$bst_evaluation_err)) - ev <- c(ev, env$bst_evaluation_err) - env$evaluation_log <- c(env$evaluation_log, - list(c(iter = env$iteration, ev))) - } - attr(callback, 'call') <- match.call() - attr(callback, 'name') <- 'cb.evaluation.log' - callback -} - -#' Callback closure for resetting the booster's parameters at each iteration. -#' -#' @param new_params a list where each element corresponds to a parameter that needs to be reset. -#' Each element's value must be either a vector of values of length \code{nrounds} -#' to be set at each iteration, -#' or a function of two parameters \code{learning_rates(iteration, nrounds)} -#' which returns a new parameter value by using the current iteration number -#' and the total number of boosting rounds. -#' -#' @details -#' This is a "pre-iteration" callback function used to reset booster's parameters -#' at the beginning of each iteration. -#' -#' Note that when training is resumed from some previous model, and a function is used to -#' reset a parameter value, the \code{nrounds} argument in this function would be the -#' the number of boosting rounds in the current training. -#' -#' Callback function expects the following values to be set in its calling frame: -#' \code{bst} or \code{bst_folds}, -#' \code{iteration}, -#' \code{begin_iteration}, -#' \code{end_iteration}. -#' -#' @seealso -#' \code{\link{callbacks}} -#' -#' @export -cb.reset.parameters <- function(new_params) { - - if (typeof(new_params) != "list") - stop("'new_params' must be a list") - pnames <- gsub("\\.", "_", names(new_params)) - nrounds <- NULL - - # run some checks in the begining - init <- function(env) { - nrounds <<- env$end_iteration - env$begin_iteration + 1 - - if (is.null(env$bst) && is.null(env$bst_folds)) - stop("Parent frame has neither 'bst' nor 'bst_folds'") - - # Some parameters are not allowed to be changed, - # since changing them would simply wreck some chaos - not_allowed <- pnames %in% - c('num_class', 'num_output_group', 'size_leaf_vector', 'updater_seq') - if (any(not_allowed)) - stop('Parameters ', paste(pnames[not_allowed]), " cannot be changed during boosting.") - - for (n in pnames) { - p <- new_params[[n]] - if (is.function(p)) { - if (length(formals(p)) != 2) - stop("Parameter '", n, "' is a function but not of two arguments") - } else if (is.numeric(p) || is.character(p)) { - if (length(p) != nrounds) - stop("Length of '", n, "' has to be equal to 'nrounds'") - } else { - stop("Parameter '", n, "' is not a function or a vector") - } - } - } - - callback <- function(env = parent.frame()) { - if (is.null(nrounds)) - init(env) - - i <- env$iteration - pars <- lapply(new_params, function(p) { - if (is.function(p)) - return(p(i, nrounds)) - p[i] - }) - - if (!is.null(env$bst)) { - xgb.parameters(env$bst$handle) <- pars - } else { - for (fd in env$bst_folds) - xgb.parameters(fd$bst) <- pars - } - } - attr(callback, 'is_pre_iteration') <- TRUE - attr(callback, 'call') <- match.call() - attr(callback, 'name') <- 'cb.reset.parameters' - callback -} - - -#' Callback closure to activate the early stopping. -#' -#' @param stopping_rounds The number of rounds with no improvement in -#' the evaluation metric in order to stop the training. -#' @param maximize whether to maximize the evaluation metric -#' @param metric_name the name of an evaluation column to use as a criteria for early -#' stopping. If not set, the last column would be used. -#' Let's say the test data in \code{watchlist} was labelled as \code{dtest}, -#' and one wants to use the AUC in test data for early stopping regardless of where -#' it is in the \code{watchlist}, then one of the following would need to be set: -#' \code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}. -#' All dash '-' characters in metric names are considered equivalent to '_'. -#' @param verbose whether to print the early stopping information. -#' -#' @details -#' This callback function determines the condition for early stopping -#' by setting the \code{stop_condition = TRUE} flag in its calling frame. -#' -#' The following additional fields are assigned to the model's R object: -#' \itemize{ -#' \item \code{best_score} the evaluation score at the best iteration -#' \item \code{best_iteration} at which boosting iteration the best score has occurred (1-based index) -#' \item \code{best_ntreelimit} to use with the \code{ntreelimit} parameter in \code{predict}. -#' It differs from \code{best_iteration} in multiclass or random forest settings. -#' } -#' -#' The Same values are also stored as xgb-attributes: -#' \itemize{ -#' \item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models) -#' \item \code{best_msg} message string is also stored. -#' } -#' -#' At least one data element is required in the evaluation watchlist for early stopping to work. -#' -#' Callback function expects the following values to be set in its calling frame: -#' \code{stop_condition}, -#' \code{bst_evaluation}, -#' \code{rank}, -#' \code{bst} (or \code{bst_folds} and \code{basket}), -#' \code{iteration}, -#' \code{begin_iteration}, -#' \code{end_iteration}, -#' \code{num_parallel_tree}. -#' -#' @seealso -#' \code{\link{callbacks}}, -#' \code{\link{xgb.attr}} -#' -#' @export -cb.early.stop <- function(stopping_rounds, maximize = FALSE, - metric_name = NULL, verbose = TRUE) { - # state variables - best_iteration <- -1 - best_ntreelimit <- -1 - best_score <- Inf - best_msg <- NULL - metric_idx <- 1 - - init <- function(env) { - if (length(env$bst_evaluation) == 0) - stop("For early stopping, watchlist must have at least one element") - - eval_names <- gsub('-', '_', names(env$bst_evaluation)) - if (!is.null(metric_name)) { - metric_idx <<- which(gsub('-', '_', metric_name) == eval_names) - if (length(metric_idx) == 0) - stop("'metric_name' for early stopping is not one of the following:\n", - paste(eval_names, collapse = ' '), '\n') - } - if (is.null(metric_name) && - length(env$bst_evaluation) > 1) { - metric_idx <<- length(eval_names) - if (verbose) - cat('Multiple eval metrics are present. Will use ', - eval_names[metric_idx], ' for early stopping.\n', sep = '') - } - - metric_name <<- eval_names[metric_idx] - - # maximize is usually NULL when not set in xgb.train and built-in metrics - if (is.null(maximize)) - maximize <<- grepl('(_auc|_map|_ndcg)', metric_name) - - if (verbose && NVL(env$rank, 0) == 0) - cat("Will train until ", metric_name, " hasn't improved in ", - stopping_rounds, " rounds.\n\n", sep = '') - - best_iteration <<- 1 - if (maximize) best_score <<- -Inf - - env$stop_condition <- FALSE - - if (!is.null(env$bst)) { - if (!inherits(env$bst, 'xgb.Booster')) - stop("'bst' in the parent frame must be an 'xgb.Booster'") - if (!is.null(best_score <- xgb.attr(env$bst$handle, 'best_score'))) { - best_score <<- as.numeric(best_score) - best_iteration <<- as.numeric(xgb.attr(env$bst$handle, 'best_iteration')) + 1 - best_msg <<- as.numeric(xgb.attr(env$bst$handle, 'best_msg')) - } else { - xgb.attributes(env$bst$handle) <- list(best_iteration = best_iteration - 1, - best_score = best_score) - } - } else if (is.null(env$bst_folds) || is.null(env$basket)) { - stop("Parent frame has neither 'bst' nor ('bst_folds' and 'basket')") - } - } - - finalizer <- function(env) { - if (!is.null(env$bst)) { - attr_best_score = as.numeric(xgb.attr(env$bst$handle, 'best_score')) - if (best_score != attr_best_score) - stop("Inconsistent 'best_score' values between the closure state: ", best_score, - " and the xgb.attr: ", attr_best_score) - env$bst$best_iteration = best_iteration - env$bst$best_ntreelimit = best_ntreelimit - env$bst$best_score = best_score - } else { - env$basket$best_iteration <- best_iteration - env$basket$best_ntreelimit <- best_ntreelimit - } - } - - callback <- function(env = parent.frame(), finalize = FALSE) { - if (best_iteration < 0) - init(env) - - if (finalize) - return(finalizer(env)) - - i <- env$iteration - score = env$bst_evaluation[metric_idx] - - if (( maximize && score > best_score) || - (!maximize && score < best_score)) { - - best_msg <<- format.eval.string(i, env$bst_evaluation, env$bst_evaluation_err) - best_score <<- score - best_iteration <<- i - best_ntreelimit <<- best_iteration * env$num_parallel_tree - # save the property to attributes, so they will occur in checkpoint - if (!is.null(env$bst)) { - xgb.attributes(env$bst) <- list( - best_iteration = best_iteration - 1, # convert to 0-based index - best_score = best_score, - best_msg = best_msg, - best_ntreelimit = best_ntreelimit) - } - } else if (i - best_iteration >= stopping_rounds) { - env$stop_condition <- TRUE - env$end_iteration <- i - if (verbose && NVL(env$rank, 0) == 0) - cat("Stopping. Best iteration:\n", best_msg, "\n\n", sep = '') - } - } - attr(callback, 'call') <- match.call() - attr(callback, 'name') <- 'cb.early.stop' - callback -} - - -#' Callback closure for saving a model file. -#' -#' @param save_period save the model to disk after every -#' \code{save_period} iterations; 0 means save the model at the end. -#' @param save_name the name or path for the saved model file. -#' It can contain a \code{\link[base]{sprintf}} formatting specifier -#' to include the integer iteration number in the file name. -#' E.g., with \code{save_name} = 'xgboost_%04d.model', -#' the file saved at iteration 50 would be named "xgboost_0050.model". -#' -#' @details -#' This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end. -#' -#' Callback function expects the following values to be set in its calling frame: -#' \code{bst}, -#' \code{iteration}, -#' \code{begin_iteration}, -#' \code{end_iteration}. -#' -#' @seealso -#' \code{\link{callbacks}} -#' -#' @export -cb.save.model <- function(save_period = 0, save_name = "xgboost.model") { - - if (save_period < 0) - stop("'save_period' cannot be negative") - - callback <- function(env = parent.frame()) { - if (is.null(env$bst)) - stop("'save_model' callback requires the 'bst' booster object in its calling frame") - - if ((save_period > 0 && (env$iteration - env$begin_iteration) %% save_period == 0) || - (save_period == 0 && env$iteration == env$end_iteration)) - xgb.save(env$bst, sprintf(save_name, env$iteration)) - } - attr(callback, 'call') <- match.call() - attr(callback, 'name') <- 'cb.save.model' - callback -} - - -#' Callback closure for returning cross-validation based predictions. -#' -#' @param save_models a flag for whether to save the folds' models. -#' -#' @details -#' This callback function saves predictions for all of the test folds, -#' and also allows to save the folds' models. -#' -#' It is a "finalizer" callback and it uses early stopping information whenever it is available, -#' thus it must be run after the early stopping callback if the early stopping is used. -#' -#' Callback function expects the following values to be set in its calling frame: -#' \code{bst_folds}, -#' \code{basket}, -#' \code{data}, -#' \code{end_iteration}, -#' \code{params}, -#' \code{num_parallel_tree}, -#' \code{num_class}. -#' -#' @return -#' Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix, -#' depending on the number of prediction outputs per data row. The order of predictions corresponds -#' to the order of rows in the original dataset. Note that when a custom \code{folds} list is -#' provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a -#' non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be -#' meaningful when user-provided folds have overlapping indices as in, e.g., random sampling splits. -#' When some of the indices in the training dataset are not included into user-provided \code{folds}, -#' their prediction value would be \code{NA}. -#' -#' @seealso -#' \code{\link{callbacks}} -#' -#' @export -cb.cv.predict <- function(save_models = FALSE) { - - finalizer <- function(env) { - if (is.null(env$basket) || is.null(env$bst_folds)) - stop("'cb.cv.predict' callback requires 'basket' and 'bst_folds' lists in its calling frame") - - N <- nrow(env$data) - pred <- - if (env$num_class > 1) { - matrix(NA_real_, N, env$num_class) - } else { - rep(NA_real_, N) - } - - ntreelimit <- NVL(env$basket$best_ntreelimit, - env$end_iteration * env$num_parallel_tree) - if (NVL(env$params[['booster']], '') == 'gblinear') { - ntreelimit <- 0 # must be 0 for gblinear - } - for (fd in env$bst_folds) { - pr <- predict(fd$bst, fd$watchlist[[2]], ntreelimit = ntreelimit, reshape = TRUE) - if (is.matrix(pred)) { - pred[fd$index,] <- pr - } else { - pred[fd$index] <- pr - } - } - env$basket$pred <- pred - if (save_models) { - env$basket$models <- lapply(env$bst_folds, function(fd) { - xgb.attr(fd$bst, 'niter') <- env$end_iteration - 1 - xgb.Booster.complete(xgb.handleToBooster(fd$bst), saveraw = TRUE) - }) - } - } - - callback <- function(env = parent.frame(), finalize = FALSE) { - if (finalize) - return(finalizer(env)) - } - attr(callback, 'call') <- match.call() - attr(callback, 'name') <- 'cb.cv.predict' - callback -} - - -#' Callback closure for collecting the model coefficients history of a gblinear booster -#' during its training. -#' -#' @param sparse when set to FALSE/TURE, a dense/sparse matrix is used to store the result. -#' Sparse format is useful when one expects only a subset of coefficients to be non-zero, -#' when using the "thrifty" feature selector with fairly small number of top features -#' selected per iteration. -#' -#' @details -#' To keep things fast and simple, gblinear booster does not internally store the history of linear -#' model coefficients at each boosting iteration. This callback provides a workaround for storing -#' the coefficients' path, by extracting them after each training iteration. -#' -#' Callback function expects the following values to be set in its calling frame: -#' \code{bst} (or \code{bst_folds}). -#' -#' @return -#' Results are stored in the \code{coefs} element of the closure. -#' The \code{\link{xgb.gblinear.history}} convenience function provides an easy way to access it. -#' With \code{xgb.train}, it is either a dense of a sparse matrix. -#' While with \code{xgb.cv}, it is a list (an element per each fold) of such matrices. -#' -#' @seealso -#' \code{\link{callbacks}}, \code{\link{xgb.gblinear.history}}. -#' -#' @examples -#' #### Binary classification: -#' # -#' # In the iris dataset, it is hard to linearly separate Versicolor class from the rest -#' # without considering the 2nd order interactions: -#' require(magrittr) -#' x <- model.matrix(Species ~ .^2, iris)[,-1] -#' colnames(x) -#' dtrain <- xgb.DMatrix(scale(x), label = 1*(iris$Species == "versicolor")) -#' param <- list(booster = "gblinear", objective = "reg:logistic", eval_metric = "auc", -#' lambda = 0.0003, alpha = 0.0003, nthread = 2) -#' # For 'shotgun', which is a default linear updater, using high eta values may result in -#' # unstable behaviour in some datasets. With this simple dataset, however, the high learning -#' # rate does not break the convergence, but allows us to illustrate the typical pattern of -#' # "stochastic explosion" behaviour of this lock-free algorithm at early boosting iterations. -#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 1., -#' callbacks = list(cb.gblinear.history())) -#' # Extract the coefficients' path and plot them vs boosting iteration number: -#' coef_path <- xgb.gblinear.history(bst) -#' matplot(coef_path, type = 'l') -#' -#' # With the deterministic coordinate descent updater, it is safer to use higher learning rates. -#' # Will try the classical componentwise boosting which selects a single best feature per round: -#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 0.8, -#' updater = 'coord_descent', feature_selector = 'thrifty', top_k = 1, -#' callbacks = list(cb.gblinear.history())) -#' xgb.gblinear.history(bst) %>% matplot(type = 'l') -#' # Componentwise boosting is known to have similar effect to Lasso regularization. -#' # Try experimenting with various values of top_k, eta, nrounds, -#' # as well as different feature_selectors. -#' -#' # For xgb.cv: -#' bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 100, eta = 0.8, -#' callbacks = list(cb.gblinear.history())) -#' # coefficients in the CV fold #3 -#' xgb.gblinear.history(bst)[[3]] %>% matplot(type = 'l') -#' -#' -#' #### Multiclass classification: -#' # -#' dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1) -#' param <- list(booster = "gblinear", objective = "multi:softprob", num_class = 3, -#' lambda = 0.0003, alpha = 0.0003, nthread = 2) -#' # For the default linear updater 'shotgun' it sometimes is helpful -#' # to use smaller eta to reduce instability -#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5, -#' callbacks = list(cb.gblinear.history())) -#' # Will plot the coefficient paths separately for each class: -#' xgb.gblinear.history(bst, class_index = 0) %>% matplot(type = 'l') -#' xgb.gblinear.history(bst, class_index = 1) %>% matplot(type = 'l') -#' xgb.gblinear.history(bst, class_index = 2) %>% matplot(type = 'l') -#' -#' # CV: -#' bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 70, eta = 0.5, -#' callbacks = list(cb.gblinear.history(FALSE))) -#' # 1st forld of 1st class -#' xgb.gblinear.history(bst, class_index = 0)[[1]] %>% matplot(type = 'l') -#' -#' @export -cb.gblinear.history <- function(sparse=FALSE) { - coefs <- NULL - - init <- function(env) { - if (!is.null(env$bst)) { # xgb.train: - coef_path <- list() - } else if (!is.null(env$bst_folds)) { # xgb.cv: - coef_path <- rep(list(), length(env$bst_folds)) - } else stop("Parent frame has neither 'bst' nor 'bst_folds'") - } - - # convert from list to (sparse) matrix - list2mat <- function(coef_list) { - if (sparse) { - coef_mat <- sparseMatrix(x = unlist(lapply(coef_list, slot, "x")), - i = unlist(lapply(coef_list, slot, "i")), - p = c(0, cumsum(sapply(coef_list, function(x) length(x@x)))), - dims = c(length(coef_list[[1]]), length(coef_list))) - return(t(coef_mat)) - } else { - return(do.call(rbind, coef_list)) - } - } - - finalizer <- function(env) { - if (length(coefs) == 0) - return() - if (!is.null(env$bst)) { # # xgb.train: - coefs <<- list2mat(coefs) - } else { # xgb.cv: - # first lapply transposes the list - coefs <<- lapply(seq_along(coefs[[1]]), function(i) lapply(coefs, "[[", i)) %>% - lapply(function(x) list2mat(x)) - } - } - - extract.coef <- function(env) { - if (!is.null(env$bst)) { # # xgb.train: - cf <- as.numeric(grep('(booster|bias|weigh)', xgb.dump(env$bst), invert = TRUE, value = TRUE)) - if (sparse) cf <- as(cf, "sparseVector") - } else { # xgb.cv: - cf <- vector("list", length(env$bst_folds)) - for (i in seq_along(env$bst_folds)) { - dmp <- xgb.dump(xgb.handleToBooster(env$bst_folds[[i]]$bst)) - cf[[i]] <- as.numeric(grep('(booster|bias|weigh)', dmp, invert = TRUE, value = TRUE)) - if (sparse) cf[[i]] <- as(cf[[i]], "sparseVector") - } - } - cf - } - - callback <- function(env = parent.frame(), finalize = FALSE) { - if (is.null(coefs)) init(env) - if (finalize) return(finalizer(env)) - cf <- extract.coef(env) - coefs <<- c(coefs, list(cf)) - } - - attr(callback, 'call') <- match.call() - attr(callback, 'name') <- 'cb.gblinear.history' - callback -} - -#' Extract gblinear coefficients history. -#' -#' A helper function to extract the matrix of linear coefficients' history -#' from a gblinear model created while using the \code{cb.gblinear.history()} -#' callback. -#' -#' @param model either an \code{xgb.Booster} or a result of \code{xgb.cv()}, trained -#' using the \code{cb.gblinear.history()} callback. -#' @param class_index zero-based class index to extract the coefficients for only that -#' specific class in a multinomial multiclass model. When it is NULL, all the -#' coefficients are returned. Has no effect in non-multiclass models. -#' -#' @return -#' For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns -#' corresponding to iteration's coefficients (in the order as \code{xgb.dump()} would -#' return) and the rows corresponding to boosting iterations. -#' -#' For an \code{xgb.cv} result, a list of such matrices is returned with the elements -#' corresponding to CV folds. -#' -#' @export -xgb.gblinear.history <- function(model, class_index = NULL) { - - if (!(inherits(model, "xgb.Booster") || - inherits(model, "xgb.cv.synchronous"))) - stop("model must be an object of either xgb.Booster or xgb.cv.synchronous class") - is_cv <- inherits(model, "xgb.cv.synchronous") - - if (is.null(model[["callbacks"]]) || is.null(model$callbacks[["cb.gblinear.history"]])) - stop("model must be trained while using the cb.gblinear.history() callback") - - if (!is_cv) { - # extract num_class & num_feat from the internal model - dmp <- xgb.dump(model) - if(length(dmp) < 2 || dmp[2] != "bias:") - stop("It does not appear to be a gblinear model") - dmp <- dmp[-c(1,2)] - n <- which(dmp == 'weight:') - if(length(n) != 1) - stop("It does not appear to be a gblinear model") - num_class <- n - 1 - num_feat <- (length(dmp) - 4) / num_class - } else { - # in case of CV, the object is expected to have this info - if (model$params$booster != "gblinear") - stop("It does not appear to be a gblinear model") - num_class <- NVL(model$params$num_class, 1) - num_feat <- model$nfeatures - if (is.null(num_feat)) - stop("This xgb.cv result does not have nfeatures info") - } - - if (!is.null(class_index) && - num_class > 1 && - (class_index[1] < 0 || class_index[1] >= num_class)) - stop("class_index has to be within [0,", num_class - 1, "]") - - coef_path <- environment(model$callbacks$cb.gblinear.history)[["coefs"]] - if (!is.null(class_index) && num_class > 1) { - coef_path <- if (is.list(coef_path)) { - lapply(coef_path, - function(x) x[, seq(1 + class_index, by=num_class, length.out=num_feat)]) - } else { - coef_path <- coef_path[, seq(1 + class_index, by=num_class, length.out=num_feat)] - } - } - coef_path -} - - -# -# Internal utility functions for callbacks ------------------------------------ -# - -# Format the evaluation metric string -format.eval.string <- function(iter, eval_res, eval_err = NULL) { - if (length(eval_res) == 0) - stop('no evaluation results') - enames <- names(eval_res) - if (is.null(enames)) - stop('evaluation results must have names') - iter <- sprintf('[%d]\t', iter) - if (!is.null(eval_err)) { - if (length(eval_res) != length(eval_err)) - stop('eval_res & eval_err lengths mismatch') - res <- paste0(sprintf("%s:%f+%f", enames, eval_res, eval_err), collapse = '\t') - } else { - res <- paste0(sprintf("%s:%f", enames, eval_res), collapse = '\t') - } - return(paste0(iter, res)) -} - -# Extract callback names from the list of callbacks -callback.names <- function(cb_list) { - unlist(lapply(cb_list, function(x) attr(x, 'name'))) -} - -# Extract callback calls from the list of callbacks -callback.calls <- function(cb_list) { - unlist(lapply(cb_list, function(x) attr(x, 'call'))) -} - -# Add a callback cb to the list and make sure that -# cb.early.stop and cb.cv.predict are at the end of the list -# with cb.cv.predict being the last (when present) -add.cb <- function(cb_list, cb) { - cb_list <- c(cb_list, cb) - names(cb_list) <- callback.names(cb_list) - if ('cb.early.stop' %in% names(cb_list)) { - cb_list <- c(cb_list, cb_list['cb.early.stop']) - # this removes only the first one - cb_list['cb.early.stop'] <- NULL - } - if ('cb.cv.predict' %in% names(cb_list)) { - cb_list <- c(cb_list, cb_list['cb.cv.predict']) - cb_list['cb.cv.predict'] <- NULL - } - cb_list -} - -# Sort callbacks list into categories -categorize.callbacks <- function(cb_list) { - list( - pre_iter = Filter(function(x) { - pre <- attr(x, 'is_pre_iteration') - !is.null(pre) && pre - }, cb_list), - post_iter = Filter(function(x) { - pre <- attr(x, 'is_pre_iteration') - is.null(pre) || !pre - }, cb_list), - finalize = Filter(function(x) { - 'finalize' %in% names(formals(x)) - }, cb_list) - ) -} - -# Check whether all callback functions with names given by 'query_names' are present in the 'cb_list'. -has.callbacks <- function(cb_list, query_names) { - if (length(cb_list) < length(query_names)) - return(FALSE) - if (!is.list(cb_list) || - any(sapply(cb_list, class) != 'function')) { - stop('`cb_list` must be a list of callback functions') - } - cb_names <- callback.names(cb_list) - if (!is.character(cb_names) || - length(cb_names) != length(cb_list) || - any(cb_names == "")) { - stop('All callbacks in the `cb_list` must have a non-empty `name` attribute') - } - if (!is.character(query_names) || - length(query_names) == 0 || - any(query_names == "")) { - stop('query_names must be a non-empty vector of non-empty character names') - } - return(all(query_names %in% cb_names)) -} diff --git a/ml-xgboost/R-package/R/utils.R b/ml-xgboost/R-package/R/utils.R deleted file mode 100644 index 0edbf12..0000000 --- a/ml-xgboost/R-package/R/utils.R +++ /dev/null @@ -1,352 +0,0 @@ -# -# This file is for the low level reuseable utility functions -# that are not supposed to be visibe to a user. -# - -# -# General helper utilities ---------------------------------------------------- -# - -# SQL-style NVL shortcut. -NVL <- function(x, val) { - if (is.null(x)) - return(val) - if (is.vector(x)) { - x[is.na(x)] <- val - return(x) - } - if (typeof(x) == 'closure') - return(x) - stop("typeof(x) == ", typeof(x), " is not supported by NVL") -} - - -# -# Low-level functions for boosting -------------------------------------------- -# - -# Merges booster params with whatever is provided in ... -# plus runs some checks -check.booster.params <- function(params, ...) { - if (!identical(class(params), "list")) - stop("params must be a list") - - # in R interface, allow for '.' instead of '_' in parameter names - names(params) <- gsub("\\.", "_", names(params)) - - # merge parameters from the params and the dots-expansion - dot_params <- list(...) - names(dot_params) <- gsub("\\.", "_", names(dot_params)) - if (length(intersect(names(params), - names(dot_params))) > 0) - stop("Same parameters in 'params' and in the call are not allowed. Please check your 'params' list.") - params <- c(params, dot_params) - - # providing a parameter multiple times makes sense only for 'eval_metric' - name_freqs <- table(names(params)) - multi_names <- setdiff(names(name_freqs[name_freqs > 1]), 'eval_metric') - if (length(multi_names) > 0) { - warning("The following parameters were provided multiple times:\n\t", - paste(multi_names, collapse = ', '), "\n Only the last value for each of them will be used.\n") - # While xgboost internals would choose the last value for a multiple-times parameter, - # enforce it here in R as well (b/c multi-parameters might be used further in R code, - # and R takes the 1st value when multiple elements with the same name are present in a list). - for (n in multi_names) { - del_idx <- which(n == names(params)) - del_idx <- del_idx[-length(del_idx)] - params[[del_idx]] <- NULL - } - } - - # for multiclass, expect num_class to be set - if (typeof(params[['objective']]) == "character" && - substr(NVL(params[['objective']], 'x'), 1, 6) == 'multi:' && - as.numeric(NVL(params[['num_class']], 0)) < 2) { - stop("'num_class' > 1 parameter must be set for multiclass classification") - } - - # monotone_constraints parser - - if (!is.null(params[['monotone_constraints']]) && - typeof(params[['monotone_constraints']]) != "character") { - vec2str = paste(params[['monotone_constraints']], collapse = ',') - vec2str = paste0('(', vec2str, ')') - params[['monotone_constraints']] = vec2str - } - - # interaction constraints parser (convert from list of column indices to string) - if (!is.null(params[['interaction_constraints']]) && - typeof(params[['interaction_constraints']]) != "character"){ - # check input class - if (!identical(class(params[['interaction_constraints']]),'list')) stop('interaction_constraints should be class list') - if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric','integer'))) { - stop('interaction_constraints should be a list of numeric/integer vectors') - } - - # recast parameter as string - interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse=','), ']')) - params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse=','), ']') - } - return(params) -} - - -# Performs some checks related to custom objective function. -# WARNING: has side-effects and can modify 'params' and 'obj' in its calling frame -check.custom.obj <- function(env = parent.frame()) { - if (!is.null(env$params[['objective']]) && !is.null(env$obj)) - stop("Setting objectives in 'params' and 'obj' at the same time is not allowed") - - if (!is.null(env$obj) && typeof(env$obj) != 'closure') - stop("'obj' must be a function") - - # handle the case when custom objective function was provided through params - if (!is.null(env$params[['objective']]) && - typeof(env$params$objective) == 'closure') { - env$obj <- env$params$objective - env$params$objective <- NULL - } -} - -# Performs some checks related to custom evaluation function. -# WARNING: has side-effects and can modify 'params' and 'feval' in its calling frame -check.custom.eval <- function(env = parent.frame()) { - if (!is.null(env$params[['eval_metric']]) && !is.null(env$feval)) - stop("Setting evaluation metrics in 'params' and 'feval' at the same time is not allowed") - - if (!is.null(env$feval) && typeof(env$feval) != 'closure') - stop("'feval' must be a function") - - # handle a situation when custom eval function was provided through params - if (!is.null(env$params[['eval_metric']]) && - typeof(env$params$eval_metric) == 'closure') { - env$feval <- env$params$eval_metric - env$params$eval_metric <- NULL - } - - # require maximize to be set when custom feval and early stopping are used together - if (!is.null(env$feval) && - is.null(env$maximize) && ( - !is.null(env$early_stopping_rounds) || - has.callbacks(env$callbacks, 'cb.early.stop'))) - stop("Please set 'maximize' to indicate whether the evaluation metric needs to be maximized or not") -} - - -# Update a booster handle for an iteration with dtrain data -xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) { - if (!identical(class(booster_handle), "xgb.Booster.handle")) { - stop("booster_handle must be of xgb.Booster.handle class") - } - if (!inherits(dtrain, "xgb.DMatrix")) { - stop("dtrain must be of xgb.DMatrix class") - } - - if (is.null(obj)) { - .Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain) - } else { - pred <- predict(booster_handle, dtrain, outputmargin = TRUE, training = TRUE) - gpair <- obj(pred, dtrain) - .Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess) - } - return(TRUE) -} - - -# Evaluate one iteration. -# Returns a named vector of evaluation metrics -# with the names in a 'datasetname-metricname' format. -xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) { - if (!identical(class(booster_handle), "xgb.Booster.handle")) - stop("class of booster_handle must be xgb.Booster.handle") - - if (length(watchlist) == 0) - return(NULL) - - evnames <- names(watchlist) - if (is.null(feval)) { - msg <- .Call(XGBoosterEvalOneIter_R, booster_handle, as.integer(iter), watchlist, as.list(evnames)) - msg <- stri_split_regex(msg, '(\\s+|:|\\s+)')[[1]][-1] - res <- as.numeric(msg[c(FALSE,TRUE)]) # even indices are the values - names(res) <- msg[c(TRUE,FALSE)] # odds are the names - } else { - res <- sapply(seq_along(watchlist), function(j) { - w <- watchlist[[j]] - preds <- predict(booster_handle, w) # predict using all trees - eval_res <- feval(preds, w) - out <- eval_res$value - names(out) <- paste0(evnames[j], "-", eval_res$metric) - out - }) - } - return(res) -} - - -# -# Helper functions for cross validation --------------------------------------- -# - -# Generates random (stratified if needed) CV folds -generate.cv.folds <- function(nfold, nrows, stratified, label, params) { - - # cannot do it for rank - if (exists('objective', where = params) && - is.character(params$objective) && - strtrim(params$objective, 5) == 'rank:') { - stop("\n\tAutomatic generation of CV-folds is not implemented for ranking!\n", - "\tConsider providing pre-computed CV-folds through the 'folds=' parameter.\n") - } - # shuffle - rnd_idx <- sample.int(nrows) - if (stratified && - length(label) == length(rnd_idx)) { - y <- label[rnd_idx] - # WARNING: some heuristic logic is employed to identify classification setting! - # - For classification, need to convert y labels to factor before making the folds, - # and then do stratification by factor levels. - # - For regression, leave y numeric and do stratification by quantiles. - if (exists('objective', where = params) && - is.character(params$objective)) { - # If 'objective' provided in params, assume that y is a classification label - # unless objective is reg:squarederror - if (params$objective != 'reg:squarederror') - y <- factor(y) - } else { - # If no 'objective' given in params, it means that user either wants to - # use the default 'reg:squarederror' objective or has provided a custom - # obj function. Here, assume classification setting when y has 5 or less - # unique values: - if (length(unique(y)) <= 5) - y <- factor(y) - } - folds <- xgb.createFolds(y, nfold) - } else { - # make simple non-stratified folds - kstep <- length(rnd_idx) %/% nfold - folds <- list() - for (i in seq_len(nfold - 1)) { - folds[[i]] <- rnd_idx[seq_len(kstep)] - rnd_idx <- rnd_idx[-seq_len(kstep)] - } - folds[[nfold]] <- rnd_idx - } - return(folds) -} - -# Creates CV folds stratified by the values of y. -# It was borrowed from caret::createFolds and simplified -# by always returning an unnamed list of fold indices. -xgb.createFolds <- function(y, k = 10) -{ - if (is.numeric(y)) { - ## Group the numeric data based on their magnitudes - ## and sample within those groups. - - ## When the number of samples is low, we may have - ## issues further slicing the numeric data into - ## groups. The number of groups will depend on the - ## ratio of the number of folds to the sample size. - ## At most, we will use quantiles. If the sample - ## is too small, we just do regular unstratified - ## CV - cuts <- floor(length(y) / k) - if (cuts < 2) cuts <- 2 - if (cuts > 5) cuts <- 5 - y <- cut(y, - unique(stats::quantile(y, probs = seq(0, 1, length = cuts))), - include.lowest = TRUE) - } - - if (k < length(y)) { - ## reset levels so that the possible levels and - ## the levels in the vector are the same - y <- factor(as.character(y)) - numInClass <- table(y) - foldVector <- vector(mode = "integer", length(y)) - - ## For each class, balance the fold allocation as far - ## as possible, then resample the remainder. - ## The final assignment of folds is also randomized. - for (i in seq_along(numInClass)) { - ## create a vector of integers from 1:k as many times as possible without - ## going over the number of samples in the class. Note that if the number - ## of samples in a class is less than k, nothing is producd here. - seqVector <- rep(seq_len(k), numInClass[i] %/% k) - ## add enough random integers to get length(seqVector) == numInClass[i] - if (numInClass[i] %% k > 0) seqVector <- c(seqVector, sample.int(k, numInClass[i] %% k)) - ## shuffle the integers for fold assignment and assign to this classes's data - ## seqVector[sample.int(length(seqVector))] is used to handle length(seqVector) == 1 - foldVector[y == dimnames(numInClass)$y[i]] <- seqVector[sample.int(length(seqVector))] - } - } else { - foldVector <- seq(along = y) - } - - out <- split(seq(along = y), foldVector) - names(out) <- NULL - out -} - - -# -# Deprectaion notice utilities ------------------------------------------------ -# - -#' Deprecation notices. -#' -#' At this time, some of the parameter names were changed in order to make the code style more uniform. -#' The deprecated parameters would be removed in the next release. -#' -#' To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table. -#' -#' A deprecation warning is shown when any of the deprecated parameters is used in a call. -#' An additional warning is shown when there was a partial match to a deprecated parameter -#' (as R is able to partially match parameter names). -#' -#' @name xgboost-deprecated -NULL - -# Lookup table for the deprecated parameters bookkeeping -depr_par_lut <- matrix(c( - 'print.every.n', 'print_every_n', - 'early.stop.round', 'early_stopping_rounds', - 'training.data', 'data', - 'with.stats', 'with_stats', - 'numberOfClusters', 'n_clusters', - 'features.keep', 'features_keep', - 'plot.height','plot_height', - 'plot.width','plot_width', - 'n_first_tree', 'trees', - 'dummy', 'DUMMY' -), ncol = 2, byrow = TRUE) -colnames(depr_par_lut) <- c('old', 'new') - -# Checks the dot-parameters for deprecated names -# (including partial matching), gives a deprecation warning, -# and sets new parameters to the old parameters' values within its parent frame. -# WARNING: has side-effects -check.deprecation <- function(..., env = parent.frame()) { - pars <- list(...) - # exact and partial matches - all_match <- pmatch(names(pars), depr_par_lut[,1]) - # indices of matched pars' names - idx_pars <- which(!is.na(all_match)) - if (length(idx_pars) == 0) return() - # indices of matched LUT rows - idx_lut <- all_match[idx_pars] - # which of idx_lut were the exact matches? - ex_match <- depr_par_lut[idx_lut,1] %in% names(pars) - for (i in seq_along(idx_pars)) { - pars_par <- names(pars)[idx_pars[i]] - old_par <- depr_par_lut[idx_lut[i], 1] - new_par <- depr_par_lut[idx_lut[i], 2] - if (!ex_match[i]) { - warning("'", pars_par, "' was partially matched to '", old_par,"'") - } - .Deprecated(new_par, old = old_par, package = 'xgboost') - if (new_par != 'NULL') { - eval(parse(text = paste(new_par, '<-', pars[[pars_par]])), envir = env) - } - } -} diff --git a/ml-xgboost/R-package/R/xgb.Booster.R b/ml-xgboost/R-package/R/xgb.Booster.R deleted file mode 100644 index dcc4469..0000000 --- a/ml-xgboost/R-package/R/xgb.Booster.R +++ /dev/null @@ -1,711 +0,0 @@ -# Construct an internal xgboost Booster and return a handle to it. -# internal utility function -xgb.Booster.handle <- function(params = list(), cachelist = list(), modelfile = NULL) { - if (typeof(cachelist) != "list" || - !all(vapply(cachelist, inherits, logical(1), what = 'xgb.DMatrix'))) { - stop("cachelist must be a list of xgb.DMatrix objects") - } - ## Load existing model, dispatch for on disk model file and in memory buffer - if (!is.null(modelfile)) { - if (typeof(modelfile) == "character") { - ## A filename - handle <- .Call(XGBoosterCreate_R, cachelist) - .Call(XGBoosterLoadModel_R, handle, modelfile[1]) - class(handle) <- "xgb.Booster.handle" - if (length(params) > 0) { - xgb.parameters(handle) <- params - } - return(handle) - } else if (typeof(modelfile) == "raw") { - ## A memory buffer - bst <- xgb.unserialize(modelfile) - xgb.parameters(bst) <- params - return (bst) - } else if (inherits(modelfile, "xgb.Booster")) { - ## A booster object - bst <- xgb.Booster.complete(modelfile, saveraw = TRUE) - bst <- xgb.unserialize(bst$raw) - xgb.parameters(bst) <- params - return (bst) - } else { - stop("modelfile must be either character filename, or raw booster dump, or xgb.Booster object") - } - } - ## Create new model - handle <- .Call(XGBoosterCreate_R, cachelist) - class(handle) <- "xgb.Booster.handle" - if (length(params) > 0) { - xgb.parameters(handle) <- params - } - return(handle) -} - -# Convert xgb.Booster.handle to xgb.Booster -# internal utility function -xgb.handleToBooster <- function(handle, raw = NULL) { - bst <- list(handle = handle, raw = raw) - class(bst) <- "xgb.Booster" - return(bst) -} - -# Check whether xgb.Booster.handle is null -# internal utility function -is.null.handle <- function(handle) { - if (is.null(handle)) return(TRUE) - - if (!identical(class(handle), "xgb.Booster.handle")) - stop("argument type must be xgb.Booster.handle") - - if (.Call(XGCheckNullPtr_R, handle)) - return(TRUE) - - return(FALSE) -} - -# Return a verified to be valid handle out of either xgb.Booster.handle or xgb.Booster -# internal utility function -xgb.get.handle <- function(object) { - if (inherits(object, "xgb.Booster")) { - handle <- object$handle - } else if (inherits(object, "xgb.Booster.handle")) { - handle <- object - } else { - stop("argument must be of either xgb.Booster or xgb.Booster.handle class") - } - if (is.null.handle(handle)) { - stop("invalid xgb.Booster.handle") - } - handle -} - -#' Restore missing parts of an incomplete xgb.Booster object. -#' -#' It attempts to complete an \code{xgb.Booster} object by restoring either its missing -#' raw model memory dump (when it has no \code{raw} data but its \code{xgb.Booster.handle} is valid) -#' or its missing internal handle (when its \code{xgb.Booster.handle} is not valid -#' but it has a raw Booster memory dump). -#' -#' @param object object of class \code{xgb.Booster} -#' @param saveraw a flag indicating whether to append \code{raw} Booster memory dump data -#' when it doesn't already exist. -#' -#' @details -#' -#' While this method is primarily for internal use, it might be useful in some practical situations. -#' -#' E.g., when an \code{xgb.Booster} model is saved as an R object and then is loaded as an R object, -#' its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods -#' should still work for such a model object since those methods would be using -#' \code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the -#' \code{xgb.Booster.complete} function explicitly once after loading a model as an R-object. -#' That would prevent further repeated implicit reconstruction of an internal booster model. -#' -#' @return -#' An object of \code{xgb.Booster} class. -#' -#' @examples -#' -#' data(agaricus.train, package='xgboost') -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -#' saveRDS(bst, "xgb.model.rds") -#' -#' bst1 <- readRDS("xgb.model.rds") -#' if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds") -#' # the handle is invalid: -#' print(bst1$handle) -#' -#' bst1 <- xgb.Booster.complete(bst1) -#' # now the handle points to a valid internal booster model: -#' print(bst1$handle) -#' -#' @export -xgb.Booster.complete <- function(object, saveraw = TRUE) { - if (!inherits(object, "xgb.Booster")) - stop("argument type must be xgb.Booster") - - if (is.null.handle(object$handle)) { - object$handle <- xgb.Booster.handle(modelfile = object$raw) - } else { - if (is.null(object$raw) && saveraw) { - object$raw <- xgb.serialize(object$handle) - } - } - - attrs <- xgb.attributes(object) - if (!is.null(attrs$best_ntreelimit)) { - object$best_ntreelimit <- as.integer(attrs$best_ntreelimit) - } - if (!is.null(attrs$best_iteration)) { - ## Convert from 0 based back to 1 based. - object$best_iteration <- as.integer(attrs$best_iteration) + 1 - } - if (!is.null(attrs$best_score)) { - object$best_score <- as.numeric(attrs$best_score) - } - if (!is.null(attrs$best_msg)) { - object$best_msg <- attrs$best_msg - } - if (!is.null(attrs$niter)) { - object$niter <- as.integer(attrs$niter) - } - - return(object) -} - -#' Predict method for eXtreme Gradient Boosting model -#' -#' Predicted values based on either xgboost model or model handle object. -#' -#' @param object Object of class \code{xgb.Booster} or \code{xgb.Booster.handle} -#' @param newdata takes \code{matrix}, \code{dgCMatrix}, local data file or \code{xgb.DMatrix}. -#' @param missing Missing is only used when input is dense matrix. Pick a float value that represents -#' missing values in data (e.g., sometimes 0 or some other extreme value is used). -#' @param outputmargin whether the prediction should be returned in the for of original untransformed -#' sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for -#' logistic regression would result in predictions for log-odds instead of probabilities. -#' @param ntreelimit limit the number of model's trees or boosting iterations used in prediction (see Details). -#' It will use all the trees by default (\code{NULL} value). -#' @param predleaf whether predict leaf index. -#' @param predcontrib whether to return feature contributions to individual predictions (see Details). -#' @param approxcontrib whether to use a fast approximation for feature contributions (see Details). -#' @param predinteraction whether to return contributions of feature interactions to individual predictions (see Details). -#' @param reshape whether to reshape the vector of predictions to a matrix form when there are several -#' prediction outputs per case. This option has no effect when either of predleaf, predcontrib, -#' or predinteraction flags is TRUE. -#' @param training whether is the prediction result used for training. For dart booster, -#' training predicting will perform dropout. -#' @param ... Parameters passed to \code{predict.xgb.Booster} -#' -#' @details -#' Note that \code{ntreelimit} is not necessarily equal to the number of boosting iterations -#' and it is not necessarily equal to the number of trees in a model. -#' E.g., in a random forest-like model, \code{ntreelimit} would limit the number of trees. -#' But for multiclass classification, while there are multiple trees per iteration, -#' \code{ntreelimit} limits the number of boosting iterations. -#' -#' Also note that \code{ntreelimit} would currently do nothing for predictions from gblinear, -#' since gblinear doesn't keep its boosting history. -#' -#' One possible practical applications of the \code{predleaf} option is to use the model -#' as a generator of new features which capture non-linearity and interactions, -#' e.g., as implemented in \code{\link{xgb.create.features}}. -#' -#' Setting \code{predcontrib = TRUE} allows to calculate contributions of each feature to -#' individual predictions. For "gblinear" booster, feature contributions are simply linear terms -#' (feature_beta * feature_value). For "gbtree" booster, feature contributions are SHAP -#' values (Lundberg 2017) that sum to the difference between the expected output -#' of the model and the current prediction (where the hessian weights are used to compute the expectations). -#' Setting \code{approxcontrib = TRUE} approximates these values following the idea explained -#' in \url{http://blog.datadive.net/interpreting-random-forests/}. -#' -#' With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features -#' are computed. Note that this operation might be rather expensive in terms of compute and memory. -#' Since it quadratically depends on the number of features, it is recommended to perform selection -#' of the most important features first. See below about the format of the returned results. -#' -#' @return -#' For regression or binary classification, it returns a vector of length \code{nrows(newdata)}. -#' For multiclass classification, either a \code{num_class * nrows(newdata)} vector or -#' a \code{(nrows(newdata), num_class)} dimension matrix is returned, depending on -#' the \code{reshape} value. -#' -#' When \code{predleaf = TRUE}, the output is a matrix object with the -#' number of columns corresponding to the number of trees. -#' -#' When \code{predcontrib = TRUE} and it is not a multiclass setting, the output is a matrix object with -#' \code{num_features + 1} columns. The last "+ 1" column in a matrix corresponds to bias. -#' For a multiclass case, a list of \code{num_class} elements is returned, where each element is -#' such a matrix. The contribution values are on the scale of untransformed margin -#' (e.g., for binary classification would mean that the contributions are log-odds deviations from bias). -#' -#' When \code{predinteraction = TRUE} and it is not a multiclass setting, the output is a 3d array with -#' dimensions \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions) -#' elements represent different features interaction contributions. The array is symmetric WRT the last -#' two dimensions. The "+ 1" columns corresponds to bias. Summing this array along the last dimension should -#' produce practically the same result as predict with \code{predcontrib = TRUE}. -#' For a multiclass case, a list of \code{num_class} elements is returned, where each element is -#' such an array. -#' -#' @seealso -#' \code{\link{xgb.train}}. -#' -#' @references -#' -#' Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874} -#' -#' Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060} -#' -#' @examples -#' ## binary classification: -#' -#' data(agaricus.train, package='xgboost') -#' data(agaricus.test, package='xgboost') -#' train <- agaricus.train -#' test <- agaricus.test -#' -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, -#' eta = 0.5, nthread = 2, nrounds = 5, objective = "binary:logistic") -#' # use all trees by default -#' pred <- predict(bst, test$data) -#' # use only the 1st tree -#' pred1 <- predict(bst, test$data, ntreelimit = 1) -#' -#' # Predicting tree leafs: -#' # the result is an nsamples X ntrees matrix -#' pred_leaf <- predict(bst, test$data, predleaf = TRUE) -#' str(pred_leaf) -#' -#' # Predicting feature contributions to predictions: -#' # the result is an nsamples X (nfeatures + 1) matrix -#' pred_contr <- predict(bst, test$data, predcontrib = TRUE) -#' str(pred_contr) -#' # verify that contributions' sums are equal to log-odds of predictions (up to float precision): -#' summary(rowSums(pred_contr) - qlogis(pred)) -#' # for the 1st record, let's inspect its features that had non-zero contribution to prediction: -#' contr1 <- pred_contr[1,] -#' contr1 <- contr1[-length(contr1)] # drop BIAS -#' contr1 <- contr1[contr1 != 0] # drop non-contributing features -#' contr1 <- contr1[order(abs(contr1))] # order by contribution magnitude -#' old_mar <- par("mar") -#' par(mar = old_mar + c(0,7,0,0)) -#' barplot(contr1, horiz = TRUE, las = 2, xlab = "contribution to prediction in log-odds") -#' par(mar = old_mar) -#' -#' -#' ## multiclass classification in iris dataset: -#' -#' lb <- as.numeric(iris$Species) - 1 -#' num_class <- 3 -#' set.seed(11) -#' bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, -#' max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5, -#' objective = "multi:softprob", num_class = num_class) -#' # predict for softmax returns num_class probability numbers per case: -#' pred <- predict(bst, as.matrix(iris[, -5])) -#' str(pred) -#' # reshape it to a num_class-columns matrix -#' pred <- matrix(pred, ncol=num_class, byrow=TRUE) -#' # convert the probabilities to softmax labels -#' pred_labels <- max.col(pred) - 1 -#' # the following should result in the same error as seen in the last iteration -#' sum(pred_labels != lb)/length(lb) -#' -#' # compare that to the predictions from softmax: -#' set.seed(11) -#' bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, -#' max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5, -#' objective = "multi:softmax", num_class = num_class) -#' pred <- predict(bst, as.matrix(iris[, -5])) -#' str(pred) -#' all.equal(pred, pred_labels) -#' # prediction from using only 5 iterations should result -#' # in the same error as seen in iteration 5: -#' pred5 <- predict(bst, as.matrix(iris[, -5]), ntreelimit=5) -#' sum(pred5 != lb)/length(lb) -#' -#' -#' ## random forest-like model of 25 trees for binary classification: -#' -#' set.seed(11) -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 5, -#' nthread = 2, nrounds = 1, objective = "binary:logistic", -#' num_parallel_tree = 25, subsample = 0.6, colsample_bytree = 0.1) -#' # Inspect the prediction error vs number of trees: -#' lb <- test$label -#' dtest <- xgb.DMatrix(test$data, label=lb) -#' err <- sapply(1:25, function(n) { -#' pred <- predict(bst, dtest, ntreelimit=n) -#' sum((pred > 0.5) != lb)/length(lb) -#' }) -#' plot(err, type='l', ylim=c(0,0.1), xlab='#trees') -#' -#' @rdname predict.xgb.Booster -#' @export -predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL, - predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE, - reshape = FALSE, training = FALSE, ...) { - - object <- xgb.Booster.complete(object, saveraw = FALSE) - if (!inherits(newdata, "xgb.DMatrix")) - newdata <- xgb.DMatrix(newdata, missing = missing) - if (!is.null(object[["feature_names"]]) && - !is.null(colnames(newdata)) && - !identical(object[["feature_names"]], colnames(newdata))) - stop("Feature names stored in `object` and `newdata` are different!") - if (is.null(ntreelimit)) - ntreelimit <- NVL(object$best_ntreelimit, 0) - if (NVL(object$params[['booster']], '') == 'gblinear') - ntreelimit <- 0 - if (ntreelimit < 0) - stop("ntreelimit cannot be negative") - - option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) + - 8L * as.logical(approxcontrib) + 16L * as.logical(predinteraction) - - ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1], - as.integer(ntreelimit), as.integer(training)) - - n_ret <- length(ret) - n_row <- nrow(newdata) - npred_per_case <- n_ret / n_row - - if (n_ret %% n_row != 0) - stop("prediction length ", n_ret, " is not multiple of nrows(newdata) ", n_row) - - if (predleaf) { - ret <- if (n_ret == n_row) { - matrix(ret, ncol = 1) - } else { - matrix(ret, nrow = n_row, byrow = TRUE) - } - } else if (predcontrib) { - n_col1 <- ncol(newdata) + 1 - n_group <- npred_per_case / n_col1 - cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL - ret <- if (n_ret == n_row) { - matrix(ret, ncol = 1, dimnames = list(NULL, cnames)) - } else if (n_group == 1) { - matrix(ret, nrow = n_row, byrow = TRUE, dimnames = list(NULL, cnames)) - } else { - arr <- array(ret, c(n_col1, n_group, n_row), - dimnames = list(cnames, NULL, NULL)) %>% aperm(c(2,3,1)) # [group, row, col] - lapply(seq_len(n_group), function(g) arr[g,,]) - } - } else if (predinteraction) { - n_col1 <- ncol(newdata) + 1 - n_group <- npred_per_case / n_col1^2 - cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL - ret <- if (n_ret == n_row) { - matrix(ret, ncol = 1, dimnames = list(NULL, cnames)) - } else if (n_group == 1) { - array(ret, c(n_col1, n_col1, n_row), dimnames = list(cnames, cnames, NULL)) %>% aperm(c(3,1,2)) - } else { - arr <- array(ret, c(n_col1, n_col1, n_group, n_row), - dimnames = list(cnames, cnames, NULL, NULL)) %>% aperm(c(3,4,1,2)) # [group, row, col1, col2] - lapply(seq_len(n_group), function(g) arr[g,,,]) - } - } else if (reshape && npred_per_case > 1) { - ret <- matrix(ret, nrow = n_row, byrow = TRUE) - } - return(ret) -} - -#' @rdname predict.xgb.Booster -#' @export -predict.xgb.Booster.handle <- function(object, ...) { - - bst <- xgb.handleToBooster(object) - - ret <- predict(bst, ...) - return(ret) -} - - -#' Accessors for serializable attributes of a model. -#' -#' These methods allow to manipulate the key-value attribute strings of an xgboost model. -#' -#' @param object Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}. -#' @param name a non-empty character string specifying which attribute is to be accessed. -#' @param value a value of an attribute for \code{xgb.attr<-}; for \code{xgb.attributes<-} -#' it's a list (or an object coercible to a list) with the names of attributes to set -#' and the elements corresponding to attribute values. -#' Non-character values are converted to character. -#' When attribute value is not a scalar, only the first index is used. -#' Use \code{NULL} to remove an attribute. -#' -#' @details -#' The primary purpose of xgboost model attributes is to store some meta-data about the model. -#' Note that they are a separate concept from the object attributes in R. -#' Specifically, they refer to key-value strings that can be attached to an xgboost model, -#' stored together with the model's binary representation, and accessed later -#' (from R or any other interface). -#' In contrast, any R-attribute assigned to an R-object of \code{xgb.Booster} class -#' would not be saved by \code{xgb.save} because an xgboost model is an external memory object -#' and its serialization is handled externally. -#' Also, setting an attribute that has the same name as one of xgboost's parameters wouldn't -#' change the value of that parameter for a model. -#' Use \code{\link{xgb.parameters<-}} to set or change model parameters. -#' -#' The attribute setters would usually work more efficiently for \code{xgb.Booster.handle} -#' than for \code{xgb.Booster}, since only just a handle (pointer) would need to be copied. -#' That would only matter if attributes need to be set many times. -#' Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters, -#' the raw model cache of an \code{xgb.Booster} object would not be automatically updated, -#' and it would be user's responsibility to call \code{xgb.serialize} to update it. -#' -#' The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes, -#' but it doesn't delete the other existing attributes. -#' -#' @return -#' \code{xgb.attr} returns either a string value of an attribute -#' or \code{NULL} if an attribute wasn't stored in a model. -#' -#' \code{xgb.attributes} returns a list of all attribute stored in a model -#' or \code{NULL} if a model has no stored attributes. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -#' -#' xgb.attr(bst, "my_attribute") <- "my attribute value" -#' print(xgb.attr(bst, "my_attribute")) -#' xgb.attributes(bst) <- list(a = 123, b = "abc") -#' -#' xgb.save(bst, 'xgb.model') -#' bst1 <- xgb.load('xgb.model') -#' if (file.exists('xgb.model')) file.remove('xgb.model') -#' print(xgb.attr(bst1, "my_attribute")) -#' print(xgb.attributes(bst1)) -#' -#' # deletion: -#' xgb.attr(bst1, "my_attribute") <- NULL -#' print(xgb.attributes(bst1)) -#' xgb.attributes(bst1) <- list(a = NULL, b = NULL) -#' print(xgb.attributes(bst1)) -#' -#' @rdname xgb.attr -#' @export -xgb.attr <- function(object, name) { - if (is.null(name) || nchar(as.character(name[1])) == 0) stop("invalid attribute name") - handle <- xgb.get.handle(object) - .Call(XGBoosterGetAttr_R, handle, as.character(name[1])) -} - -#' @rdname xgb.attr -#' @export -`xgb.attr<-` <- function(object, name, value) { - if (is.null(name) || nchar(as.character(name[1])) == 0) stop("invalid attribute name") - handle <- xgb.get.handle(object) - if (!is.null(value)) { - # Coerce the elements to be scalar strings. - # Q: should we warn user about non-scalar elements? - if (is.numeric(value[1])) { - value <- format(value[1], digits = 17) - } else { - value <- as.character(value[1]) - } - } - .Call(XGBoosterSetAttr_R, handle, as.character(name[1]), value) - if (is(object, 'xgb.Booster') && !is.null(object$raw)) { - object$raw <- xgb.serialize(object$handle) - } - object -} - -#' @rdname xgb.attr -#' @export -xgb.attributes <- function(object) { - handle <- xgb.get.handle(object) - attr_names <- .Call(XGBoosterGetAttrNames_R, handle) - if (is.null(attr_names)) return(NULL) - res <- lapply(attr_names, function(x) { - .Call(XGBoosterGetAttr_R, handle, x) - }) - names(res) <- attr_names - res -} - -#' @rdname xgb.attr -#' @export -`xgb.attributes<-` <- function(object, value) { - a <- as.list(value) - if (is.null(names(a)) || any(nchar(names(a)) == 0)) { - stop("attribute names cannot be empty strings") - } - # Coerce the elements to be scalar strings. - # Q: should we warn a user about non-scalar elements? - a <- lapply(a, function(x) { - if (is.null(x)) return(NULL) - if (is.numeric(x[1])) { - format(x[1], digits = 17) - } else { - as.character(x[1]) - } - }) - handle <- xgb.get.handle(object) - for (i in seq_along(a)) { - .Call(XGBoosterSetAttr_R, handle, names(a[i]), a[[i]]) - } - if (is(object, 'xgb.Booster') && !is.null(object$raw)) { - object$raw <- xgb.serialize(object$handle) - } - object -} - -#' Accessors for model parameters as JSON string. -#' -#' @param object Object of class \code{xgb.Booster} -#' @param value A JSON string. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -#' config <- xgb.config(bst) -#' -#' @rdname xgb.config -#' @export -xgb.config <- function(object) { - handle <- xgb.get.handle(object) - .Call(XGBoosterSaveJsonConfig_R, handle); -} - -#' @rdname xgb.config -#' @export -`xgb.config<-` <- function(object, value) { - handle <- xgb.get.handle(object) - .Call(XGBoosterLoadJsonConfig_R, handle, value) - object$raw <- NULL # force renew the raw buffer - object <- xgb.Booster.complete(object) - object -} - -#' Accessors for model parameters. -#' -#' Only the setter for xgboost parameters is currently implemented. -#' -#' @param object Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}. -#' @param value a list (or an object coercible to a list) with the names of parameters to set -#' and the elements corresponding to parameter values. -#' -#' @details -#' Note that the setter would usually work more efficiently for \code{xgb.Booster.handle} -#' than for \code{xgb.Booster}, since only just a handle would need to be copied. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -#' -#' xgb.parameters(bst) <- list(eta = 0.1) -#' -#' @rdname xgb.parameters -#' @export -`xgb.parameters<-` <- function(object, value) { - if (length(value) == 0) return(object) - p <- as.list(value) - if (is.null(names(p)) || any(nchar(names(p)) == 0)) { - stop("parameter names cannot be empty strings") - } - names(p) <- gsub("\\.", "_", names(p)) - p <- lapply(p, function(x) as.character(x)[1]) - handle <- xgb.get.handle(object) - for (i in seq_along(p)) { - .Call(XGBoosterSetParam_R, handle, names(p[i]), p[[i]]) - } - if (is(object, 'xgb.Booster') && !is.null(object$raw)) { - object$raw <- xgb.serialize(object$handle) - } - object -} - -# Extract the number of trees in a model. -# TODO: either add a getter to C-interface, or simply set an 'ntree' attribute after each iteration. -# internal utility function -xgb.ntree <- function(bst) { - length(grep('^booster', xgb.dump(bst))) -} - - -#' Print xgb.Booster -#' -#' Print information about xgb.Booster. -#' -#' @param x an xgb.Booster object -#' @param verbose whether to print detailed data (e.g., attribute values) -#' @param ... not currently used -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -#' attr(bst, 'myattr') <- 'memo' -#' -#' print(bst) -#' print(bst, verbose=TRUE) -#' -#' @method print xgb.Booster -#' @export -print.xgb.Booster <- function(x, verbose = FALSE, ...) { - cat('##### xgb.Booster\n') - - valid_handle <- !is.null.handle(x$handle) - if (!valid_handle) - cat("Handle is invalid! Suggest using xgb.Booster.complete\n") - - cat('raw: ') - if (!is.null(x$raw)) { - cat(format(object.size(x$raw), units = "auto"), '\n') - } else { - cat('NULL\n') - } - if (!is.null(x$call)) { - cat('call:\n ') - print(x$call) - } - - if (!is.null(x$params)) { - cat('params (as set within xgb.train):\n') - cat( ' ', - paste(names(x$params), - paste0('"', unlist(x$params), '"'), - sep = ' = ', collapse = ', '), '\n', sep = '') - } - # TODO: need an interface to access all the xgboosts parameters - - attrs <- character(0) - if (valid_handle) - attrs <- xgb.attributes(x) - if (length(attrs) > 0) { - cat('xgb.attributes:\n') - if (verbose) { - cat( paste(paste0(' ',names(attrs)), - paste0('"', unlist(attrs), '"'), - sep = ' = ', collapse = '\n'), '\n', sep = '') - } else { - cat(' ', paste(names(attrs), collapse = ', '), '\n', sep = '') - } - } - - if (!is.null(x$callbacks) && length(x$callbacks) > 0) { - cat('callbacks:\n') - lapply(callback.calls(x$callbacks), function(x) { - cat(' ') - print(x) - }) - } - - if (!is.null(x$feature_names)) - cat('# of features:', length(x$feature_names), '\n') - - cat('niter: ', x$niter, '\n', sep = '') - # TODO: uncomment when faster xgb.ntree is implemented - #cat('ntree: ', xgb.ntree(x), '\n', sep='') - - for (n in setdiff(names(x), c('handle', 'raw', 'call', 'params', 'callbacks', - 'evaluation_log','niter','feature_names'))) { - if (is.atomic(x[[n]])) { - cat(n, ':', x[[n]], '\n', sep = ' ') - } else { - cat(n, ':\n\t', sep = ' ') - print(x[[n]]) - } - } - - if (!is.null(x$evaluation_log)) { - cat('evaluation_log:\n') - print(x$evaluation_log, row.names = FALSE, topn = 2) - } - - invisible(x) -} diff --git a/ml-xgboost/R-package/R/xgb.DMatrix.R b/ml-xgboost/R-package/R/xgb.DMatrix.R deleted file mode 100644 index 4201a83..0000000 --- a/ml-xgboost/R-package/R/xgb.DMatrix.R +++ /dev/null @@ -1,380 +0,0 @@ -#' Construct xgb.DMatrix object -#' -#' Construct xgb.DMatrix object from either a dense matrix, a sparse matrix, or a local file. -#' Supported input file formats are either a libsvm text file or a binary file that was created previously by -#' \code{\link{xgb.DMatrix.save}}). -#' -#' @param data a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character -#' string representing a filename. -#' @param info a named list of additional information to store in the \code{xgb.DMatrix} object. -#' See \code{\link{setinfo}} for the specific allowed kinds of -#' @param missing a float value to represents missing values in data (used only when input is a dense matrix). -#' It is useful when a 0 or some other extreme value represents missing values in data. -#' @param silent whether to suppress printing an informational message after loading from a file. -#' @param ... the \code{info} data could be passed directly as parameters, without creating an \code{info} list. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' dtrain <- xgb.DMatrix(train$data, label=train$label) -#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data') -#' dtrain <- xgb.DMatrix('xgb.DMatrix.data') -#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data') -#' @export -xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, ...) { - cnames <- NULL - if (typeof(data) == "character") { - if (length(data) > 1) - stop("'data' has class 'character' and length ", length(data), - ".\n 'data' accepts either a numeric matrix or a single filename.") - handle <- .Call(XGDMatrixCreateFromFile_R, data, as.integer(silent)) - } else if (is.matrix(data)) { - handle <- .Call(XGDMatrixCreateFromMat_R, data, missing) - cnames <- colnames(data) - } else if (inherits(data, "dgCMatrix")) { - handle <- .Call(XGDMatrixCreateFromCSC_R, data@p, data@i, data@x, nrow(data)) - cnames <- colnames(data) - } else { - stop("xgb.DMatrix does not support construction from ", typeof(data)) - } - dmat <- handle - attributes(dmat) <- list(.Dimnames = list(NULL, cnames), class = "xgb.DMatrix") - - info <- append(info, list(...)) - for (i in seq_along(info)) { - p <- info[i] - setinfo(dmat, names(p), p[[1]]) - } - return(dmat) -} - - -# get dmatrix from data, label -# internal helper method -xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL) { - if (inherits(data, "dgCMatrix") || is.matrix(data)) { - if (is.null(label)) { - stop("label must be provided when data is a matrix") - } - dtrain <- xgb.DMatrix(data, label = label, missing = missing) - if (!is.null(weight)){ - setinfo(dtrain, "weight", weight) - } - } else { - if (!is.null(label)) { - warning("xgboost: label will be ignored.") - } - if (is.character(data)) { - dtrain <- xgb.DMatrix(data[1]) - } else if (inherits(data, "xgb.DMatrix")) { - dtrain <- data - } else if (inherits(data, "data.frame")) { - stop("xgboost doesn't support data.frame as input. Convert it to matrix first.") - } else { - stop("xgboost: invalid input data") - } - } - return (dtrain) -} - - -#' Dimensions of xgb.DMatrix -#' -#' Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}. -#' @param x Object of class \code{xgb.DMatrix} -#' -#' @details -#' Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also -#' be directly used with an \code{xgb.DMatrix} object. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' dtrain <- xgb.DMatrix(train$data, label=train$label) -#' -#' stopifnot(nrow(dtrain) == nrow(train$data)) -#' stopifnot(ncol(dtrain) == ncol(train$data)) -#' stopifnot(all(dim(dtrain) == dim(train$data))) -#' -#' @export -dim.xgb.DMatrix <- function(x) { - c(.Call(XGDMatrixNumRow_R, x), .Call(XGDMatrixNumCol_R, x)) -} - - -#' Handling of column names of \code{xgb.DMatrix} -#' -#' Only column names are supported for \code{xgb.DMatrix}, thus setting of -#' row names would have no effect and returned row names would be NULL. -#' -#' @param x object of class \code{xgb.DMatrix} -#' @param value a list of two elements: the first one is ignored -#' and the second one is column names -#' -#' @details -#' Generic \code{dimnames} methods are used by \code{colnames}. -#' Since row names are irrelevant, it is recommended to use \code{colnames} directly. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' dtrain <- xgb.DMatrix(train$data, label=train$label) -#' dimnames(dtrain) -#' colnames(dtrain) -#' colnames(dtrain) <- make.names(1:ncol(train$data)) -#' print(dtrain, verbose=TRUE) -#' -#' @rdname dimnames.xgb.DMatrix -#' @export -dimnames.xgb.DMatrix <- function(x) { - attr(x, '.Dimnames') -} - -#' @rdname dimnames.xgb.DMatrix -#' @export -`dimnames<-.xgb.DMatrix` <- function(x, value) { - if (!is.list(value) || length(value) != 2L) - stop("invalid 'dimnames' given: must be a list of two elements") - if (!is.null(value[[1L]])) - stop("xgb.DMatrix does not have rownames") - if (is.null(value[[2]])) { - attr(x, '.Dimnames') <- NULL - return(x) - } - if (ncol(x) != length(value[[2]])) - stop("can't assign ", length(value[[2]]), " colnames to a ", - ncol(x), " column xgb.DMatrix") - attr(x, '.Dimnames') <- value - x -} - - -#' Get information of an xgb.DMatrix object -#' -#' Get information of an xgb.DMatrix object -#' @param object Object of class \code{xgb.DMatrix} -#' @param name the name of the information field to get (see details) -#' @param ... other parameters -#' -#' @details -#' The \code{name} field can be one of the following: -#' -#' \itemize{ -#' \item \code{label}: label Xgboost learn from ; -#' \item \code{weight}: to do a weight rescale ; -#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ; -#' \item \code{nrow}: number of rows of the \code{xgb.DMatrix}. -#' -#' } -#' -#' \code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' dtrain <- xgb.DMatrix(train$data, label=train$label) -#' -#' labels <- getinfo(dtrain, 'label') -#' setinfo(dtrain, 'label', 1-labels) -#' -#' labels2 <- getinfo(dtrain, 'label') -#' stopifnot(all(labels2 == 1-labels)) -#' @rdname getinfo -#' @export -getinfo <- function(object, ...) UseMethod("getinfo") - -#' @rdname getinfo -#' @export -getinfo.xgb.DMatrix <- function(object, name, ...) { - if (typeof(name) != "character" || - length(name) != 1 || - !name %in% c('label', 'weight', 'base_margin', 'nrow', - 'label_lower_bound', 'label_upper_bound')) { - stop("getinfo: name must be one of the following\n", - " 'label', 'weight', 'base_margin', 'nrow', 'label_lower_bound', 'label_upper_bound'") - } - if (name != "nrow"){ - ret <- .Call(XGDMatrixGetInfo_R, object, name) - } else { - ret <- nrow(object) - } - if (length(ret) == 0) return(NULL) - return(ret) -} - - -#' Set information of an xgb.DMatrix object -#' -#' Set information of an xgb.DMatrix object -#' -#' @param object Object of class "xgb.DMatrix" -#' @param name the name of the field to get -#' @param info the specific field of information to set -#' @param ... other parameters -#' -#' @details -#' The \code{name} field can be one of the following: -#' -#' \itemize{ -#' \item \code{label}: label Xgboost learn from ; -#' \item \code{weight}: to do a weight rescale ; -#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ; -#' \item \code{group}: number of rows in each group (to use with \code{rank:pairwise} objective). -#' } -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' dtrain <- xgb.DMatrix(train$data, label=train$label) -#' -#' labels <- getinfo(dtrain, 'label') -#' setinfo(dtrain, 'label', 1-labels) -#' labels2 <- getinfo(dtrain, 'label') -#' stopifnot(all.equal(labels2, 1-labels)) -#' @rdname setinfo -#' @export -setinfo <- function(object, ...) UseMethod("setinfo") - -#' @rdname setinfo -#' @export -setinfo.xgb.DMatrix <- function(object, name, info, ...) { - if (name == "label") { - if (length(info) != nrow(object)) - stop("The length of labels must equal to the number of rows in the input data") - .Call(XGDMatrixSetInfo_R, object, name, as.numeric(info)) - return(TRUE) - } - if (name == "label_lower_bound") { - if (length(info) != nrow(object)) - stop("The length of lower-bound labels must equal to the number of rows in the input data") - .Call(XGDMatrixSetInfo_R, object, name, as.numeric(info)) - return(TRUE) - } - if (name == "label_upper_bound") { - if (length(info) != nrow(object)) - stop("The length of upper-bound labels must equal to the number of rows in the input data") - .Call(XGDMatrixSetInfo_R, object, name, as.numeric(info)) - return(TRUE) - } - if (name == "weight") { - if (length(info) != nrow(object)) - stop("The length of weights must equal to the number of rows in the input data") - .Call(XGDMatrixSetInfo_R, object, name, as.numeric(info)) - return(TRUE) - } - if (name == "base_margin") { - # if (length(info)!=nrow(object)) - # stop("The length of base margin must equal to the number of rows in the input data") - .Call(XGDMatrixSetInfo_R, object, name, as.numeric(info)) - return(TRUE) - } - if (name == "group") { - if (sum(info) != nrow(object)) - stop("The sum of groups must equal to the number of rows in the input data") - .Call(XGDMatrixSetInfo_R, object, name, as.integer(info)) - return(TRUE) - } - stop("setinfo: unknown info name ", name) - return(FALSE) -} - - -#' Get a new DMatrix containing the specified rows of -#' original xgb.DMatrix object -#' -#' Get a new DMatrix containing the specified rows of -#' original xgb.DMatrix object -#' -#' @param object Object of class "xgb.DMatrix" -#' @param idxset a integer vector of indices of rows needed -#' @param colset currently not used (columns subsetting is not available) -#' @param ... other parameters (currently not used) -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' dtrain <- xgb.DMatrix(train$data, label=train$label) -#' -#' dsub <- slice(dtrain, 1:42) -#' labels1 <- getinfo(dsub, 'label') -#' dsub <- dtrain[1:42, ] -#' labels2 <- getinfo(dsub, 'label') -#' all.equal(labels1, labels2) -#' -#' @rdname slice.xgb.DMatrix -#' @export -slice <- function(object, ...) UseMethod("slice") - -#' @rdname slice.xgb.DMatrix -#' @export -slice.xgb.DMatrix <- function(object, idxset, ...) { - if (!inherits(object, "xgb.DMatrix")) { - stop("object must be xgb.DMatrix") - } - ret <- .Call(XGDMatrixSliceDMatrix_R, object, idxset) - - attr_list <- attributes(object) - nr <- nrow(object) - len <- sapply(attr_list, NROW) - ind <- which(len == nr) - if (length(ind) > 0) { - nms <- names(attr_list)[ind] - for (i in seq_along(ind)) { - obj_attr <- attr(object, nms[i]) - if (NCOL(obj_attr) > 1) { - attr(ret, nms[i]) <- obj_attr[idxset,] - } else { - attr(ret, nms[i]) <- obj_attr[idxset] - } - } - } - return(structure(ret, class = "xgb.DMatrix")) -} - -#' @rdname slice.xgb.DMatrix -#' @export -`[.xgb.DMatrix` <- function(object, idxset, colset = NULL) { - slice(object, idxset) -} - - -#' Print xgb.DMatrix -#' -#' Print information about xgb.DMatrix. -#' Currently it displays dimensions and presence of info-fields and colnames. -#' -#' @param x an xgb.DMatrix object -#' @param verbose whether to print colnames (when present) -#' @param ... not currently used -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' dtrain <- xgb.DMatrix(train$data, label=train$label) -#' -#' dtrain -#' print(dtrain, verbose=TRUE) -#' -#' @method print xgb.DMatrix -#' @export -print.xgb.DMatrix <- function(x, verbose = FALSE, ...) { - cat('xgb.DMatrix dim:', nrow(x), 'x', ncol(x), ' info: ') - infos <- c() - if(length(getinfo(x, 'label')) > 0) infos <- 'label' - if(length(getinfo(x, 'weight')) > 0) infos <- c(infos, 'weight') - if(length(getinfo(x, 'base_margin')) > 0) infos <- c(infos, 'base_margin') - if (length(infos) == 0) infos <- 'NA' - cat(infos) - cnames <- colnames(x) - cat(' colnames:') - if (verbose & !is.null(cnames)) { - cat("\n'") - cat(cnames, sep = "','") - cat("'") - } else { - if (is.null(cnames)) cat(' no') - else cat(' yes') - } - cat("\n") - invisible(x) -} diff --git a/ml-xgboost/R-package/R/xgb.DMatrix.save.R b/ml-xgboost/R-package/R/xgb.DMatrix.save.R deleted file mode 100644 index 1c659e5..0000000 --- a/ml-xgboost/R-package/R/xgb.DMatrix.save.R +++ /dev/null @@ -1,24 +0,0 @@ -#' Save xgb.DMatrix object to binary file -#' -#' Save xgb.DMatrix object to binary file -#' -#' @param dmatrix the \code{xgb.DMatrix} object -#' @param fname the name of the file to write. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' dtrain <- xgb.DMatrix(train$data, label=train$label) -#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data') -#' dtrain <- xgb.DMatrix('xgb.DMatrix.data') -#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data') -#' @export -xgb.DMatrix.save <- function(dmatrix, fname) { - if (typeof(fname) != "character") - stop("fname must be character") - if (!inherits(dmatrix, "xgb.DMatrix")) - stop("dmatrix must be xgb.DMatrix") - - .Call(XGDMatrixSaveBinary_R, dmatrix, fname[1], 0L) - return(TRUE) -} diff --git a/ml-xgboost/R-package/R/xgb.create.features.R b/ml-xgboost/R-package/R/xgb.create.features.R deleted file mode 100644 index b8be649..0000000 --- a/ml-xgboost/R-package/R/xgb.create.features.R +++ /dev/null @@ -1,87 +0,0 @@ -#' Create new features from a previously learned model -#' -#' May improve the learning by adding new features to the training data based on the decision trees from a previously learned model. -#' -#' @param model decision tree boosting model learned on the original data -#' @param data original data (usually provided as a \code{dgCMatrix} matrix) -#' @param ... currently not used -#' -#' @return \code{dgCMatrix} matrix including both the original data and the new features. -#' -#' @details -#' This is the function inspired from the paragraph 3.1 of the paper: -#' -#' \strong{Practical Lessons from Predicting Clicks on Ads at Facebook} -#' -#' \emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers, -#' Joaquin Quinonero Candela)} -#' -#' International Workshop on Data Mining for Online Advertising (ADKDD) - August 24, 2014 -#' -#' \url{https://research.fb.com/publications/practical-lessons-from-predicting-clicks-on-ads-at-facebook/}. -#' -#' Extract explaining the method: -#' -#' "We found that boosted decision trees are a powerful and very -#' convenient way to implement non-linear and tuple transformations -#' of the kind we just described. We treat each individual -#' tree as a categorical feature that takes as value the -#' index of the leaf an instance ends up falling in. We use -#' 1-of-K coding of this type of features. -#' -#' For example, consider the boosted tree model in Figure 1 with 2 subtrees, -#' where the first subtree has 3 leafs and the second 2 leafs. If an -#' instance ends up in leaf 2 in the first subtree and leaf 1 in -#' second subtree, the overall input to the linear classifier will -#' be the binary vector \code{[0, 1, 0, 1, 0]}, where the first 3 entries -#' correspond to the leaves of the first subtree and last 2 to -#' those of the second subtree. -#' -#' [...] -#' -#' We can understand boosted decision tree -#' based transformation as a supervised feature encoding that -#' converts a real-valued vector into a compact binary-valued -#' vector. A traversal from root node to a leaf node represents -#' a rule on certain features." -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' data(agaricus.test, package='xgboost') -#' dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label) -#' dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label) -#' -#' param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic') -#' nrounds = 4 -#' -#' bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2) -#' -#' # Model accuracy without new features -#' accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / -#' length(agaricus.test$label) -#' -#' # Convert previous features to one hot encoding -#' new.features.train <- xgb.create.features(model = bst, agaricus.train$data) -#' new.features.test <- xgb.create.features(model = bst, agaricus.test$data) -#' -#' # learning with new features -#' new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label) -#' new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label) -#' watchlist <- list(train = new.dtrain) -#' bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2) -#' -#' # Model accuracy with new features -#' accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / -#' length(agaricus.test$label) -#' -#' # Here the accuracy was already good and is now perfect. -#' cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", -#' accuracy.after, "!\n")) -#' -#' @export -xgb.create.features <- function(model, data, ...){ - check.deprecation(...) - pred_with_leaf <- predict(model, data, predleaf = TRUE) - cols <- lapply(as.data.frame(pred_with_leaf), factor) - cbind(data, sparse.model.matrix( ~ . -1, cols)) -} diff --git a/ml-xgboost/R-package/R/xgb.cv.R b/ml-xgboost/R-package/R/xgb.cv.R deleted file mode 100644 index fdfcb59..0000000 --- a/ml-xgboost/R-package/R/xgb.cv.R +++ /dev/null @@ -1,319 +0,0 @@ -#' Cross Validation -#' -#' The cross validation function of xgboost -#' -#' @param params the list of parameters. Commonly used ones are: -#' \itemize{ -#' \item \code{objective} objective function, common ones are -#' \itemize{ -#' \item \code{reg:squarederror} Regression with squared loss -#' \item \code{binary:logistic} logistic regression for classification -#' } -#' \item \code{eta} step size of each boosting step -#' \item \code{max_depth} maximum depth of the tree -#' \item \code{nthread} number of thread used in training, if not set, all threads are used -#' } -#' -#' See \code{\link{xgb.train}} for further details. -#' See also demo/ for walkthrough example in R. -#' @param data takes an \code{xgb.DMatrix}, \code{matrix}, or \code{dgCMatrix} as the input. -#' @param nrounds the max number of iterations -#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples. -#' @param label vector of response values. Should be provided only when data is an R-matrix. -#' @param missing is only used when input is a dense matrix. By default is set to NA, which means -#' that NA values should be considered as 'missing' by the algorithm. -#' Sometimes, 0 or other extreme value might be used to represent missing values. -#' @param prediction A logical value indicating whether to return the test fold predictions -#' from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback. -#' @param showsd \code{boolean}, whether to show standard deviation of cross validation -#' @param metrics, list of evaluation metrics to be used in cross validation, -#' when it is not specified, the evaluation metric is chosen according to objective function. -#' Possible options are: -#' \itemize{ -#' \item \code{error} binary classification error rate -#' \item \code{rmse} Rooted mean square error -#' \item \code{logloss} negative log-likelihood function -#' \item \code{auc} Area under curve -#' \item \code{aucpr} Area under PR curve -#' \item \code{merror} Exact matching error, used to evaluate multi-class classification -#' } -#' @param obj customized objective function. Returns gradient and second order -#' gradient with given prediction and dtrain. -#' @param feval customized evaluation function. Returns -#' \code{list(metric='metric-name', value='metric-value')} with given -#' prediction and dtrain. -#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified -#' by the values of outcome labels. -#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds -#' (each element must be a vector of test fold's indices). When folds are supplied, -#' the \code{nfold} and \code{stratified} parameters are ignored. -#' @param train_folds \code{list} list specifying which indicies to use for training. If \code{NULL} -#' (the default) all indices not specified in \code{folds} will be used for training. -#' @param verbose \code{boolean}, print the statistics during the process -#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}. -#' Default is 1 which means all messages are printed. This parameter is passed to the -#' \code{\link{cb.print.evaluation}} callback. -#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered. -#' If set to an integer \code{k}, training with a validation set will stop if the performance -#' doesn't improve for \code{k} rounds. -#' Setting this parameter engages the \code{\link{cb.early.stop}} callback. -#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set, -#' then this parameter must be set as well. -#' When it is \code{TRUE}, it means the larger the evaluation score the better. -#' This parameter is passed to the \code{\link{cb.early.stop}} callback. -#' @param callbacks a list of callback functions to perform various task during boosting. -#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the -#' parameters' values. User can provide either existing or their own callback methods in order -#' to customize the training process. -#' @param ... other parameters to pass to \code{params}. -#' -#' @details -#' The original sample is randomly partitioned into \code{nfold} equal size subsamples. -#' -#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data. -#' -#' The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data. -#' -#' All observations are used for both training and validation. -#' -#' Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation} -#' -#' @return -#' An object of class \code{xgb.cv.synchronous} with the following elements: -#' \itemize{ -#' \item \code{call} a function call. -#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not -#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback. -#' \item \code{callbacks} callback functions that were either automatically assigned or -#' explicitly passed. -#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the -#' first column corresponding to iteration number and the rest corresponding to the -#' CV-based evaluation means and standard deviations for the training and test CV-sets. -#' It is created by the \code{\link{cb.evaluation.log}} callback. -#' \item \code{niter} number of boosting iterations. -#' \item \code{nfeatures} number of features in training data. -#' \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds} -#' parameter or randomly generated. -#' \item \code{best_iteration} iteration number with the best evaluation metric value -#' (only available with early stopping). -#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration, -#' which could further be used in \code{predict} method -#' (only available with early stopping). -#' \item \code{pred} CV prediction values available when \code{prediction} is set. -#' It is either vector or matrix (see \code{\link{cb.cv.predict}}). -#' \item \code{models} a list of the CV folds' models. It is only available with the explicit -#' setting of the \code{cb.cv.predict(save_models = TRUE)} callback. -#' } -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -#' cv <- xgb.cv(data = dtrain, nrounds = 3, nthread = 2, nfold = 5, metrics = list("rmse","auc"), -#' max_depth = 3, eta = 1, objective = "binary:logistic") -#' print(cv) -#' print(cv, verbose=TRUE) -#' -#' @export -xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA, - prediction = FALSE, showsd = TRUE, metrics=list(), - obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, train_folds = NULL, - verbose = TRUE, print_every_n=1L, - early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) { - - check.deprecation(...) - - params <- check.booster.params(params, ...) - # TODO: should we deprecate the redundant 'metrics' parameter? - for (m in metrics) - params <- c(params, list("eval_metric" = m)) - - check.custom.obj() - check.custom.eval() - - #if (is.null(params[['eval_metric']]) && is.null(feval)) - # stop("Either 'eval_metric' or 'feval' must be provided for CV") - - # Check the labels - if ( (inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) || - (!inherits(data, 'xgb.DMatrix') && is.null(label))) { - stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix") - } else if (inherits(data, 'xgb.DMatrix')) { - if (!is.null(label)) - warning("xgb.cv: label will be ignored, since data is of type xgb.DMatrix") - cv_label = getinfo(data, 'label') - } else { - cv_label = label - } - - # CV folds - if(!is.null(folds)) { - if(!is.list(folds) || length(folds) < 2) - stop("'folds' must be a list with 2 or more elements that are vectors of indices for each CV-fold") - nfold <- length(folds) - } else { - if (nfold <= 1) - stop("'nfold' must be > 1") - folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, params) - } - - # Potential TODO: sequential CV - #if (strategy == 'sequential') - # stop('Sequential CV strategy is not yet implemented') - - # verbosity & evaluation printing callback: - params <- c(params, list(silent = 1)) - print_every_n <- max( as.integer(print_every_n), 1L) - if (!has.callbacks(callbacks, 'cb.print.evaluation') && verbose) { - callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n, showsd = showsd)) - } - # evaluation log callback: always is on in CV - evaluation_log <- list() - if (!has.callbacks(callbacks, 'cb.evaluation.log')) { - callbacks <- add.cb(callbacks, cb.evaluation.log()) - } - # Early stopping callback - stop_condition <- FALSE - if (!is.null(early_stopping_rounds) && - !has.callbacks(callbacks, 'cb.early.stop')) { - callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds, - maximize = maximize, verbose = verbose)) - } - # CV-predictions callback - if (prediction && - !has.callbacks(callbacks, 'cb.cv.predict')) { - callbacks <- add.cb(callbacks, cb.cv.predict(save_models = FALSE)) - } - # Sort the callbacks into categories - cb <- categorize.callbacks(callbacks) - - - # create the booster-folds - # train_folds - dall <- xgb.get.DMatrix(data, label, missing) - bst_folds <- lapply(seq_along(folds), function(k) { - dtest <- slice(dall, folds[[k]]) - # code originally contributed by @RolandASc on stackoverflow - if(is.null(train_folds)) - dtrain <- slice(dall, unlist(folds[-k])) - else - dtrain <- slice(dall, train_folds[[k]]) - handle <- xgb.Booster.handle(params, list(dtrain, dtest)) - list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test=dtest), index = folds[[k]]) - }) - rm(dall) - # a "basket" to collect some results from callbacks - basket <- list() - - # extract parameters that can affect the relationship b/w #trees and #iterations - num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) - num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1) - - # those are fixed for CV (no training continuation) - begin_iteration <- 1 - end_iteration <- nrounds - - # synchronous CV boosting: run CV folds' models within each iteration - for (iteration in begin_iteration:end_iteration) { - - for (f in cb$pre_iter) f() - - msg <- lapply(bst_folds, function(fd) { - xgb.iter.update(fd$bst, fd$dtrain, iteration - 1, obj) - xgb.iter.eval(fd$bst, fd$watchlist, iteration - 1, feval) - }) - msg <- simplify2array(msg) - bst_evaluation <- rowMeans(msg) - bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2) - - for (f in cb$post_iter) f() - - if (stop_condition) break - } - for (f in cb$finalize) f(finalize = TRUE) - - # the CV result - ret <- list( - call = match.call(), - params = params, - callbacks = callbacks, - evaluation_log = evaluation_log, - niter = end_iteration, - nfeatures = ncol(data), - folds = folds - ) - ret <- c(ret, basket) - - class(ret) <- 'xgb.cv.synchronous' - invisible(ret) -} - - - -#' Print xgb.cv result -#' -#' Prints formatted results of \code{xgb.cv}. -#' -#' @param x an \code{xgb.cv.synchronous} object -#' @param verbose whether to print detailed data -#' @param ... passed to \code{data.table.print} -#' -#' @details -#' When not verbose, it would only print the evaluation results, -#' including the best iteration (when available). -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' train <- agaricus.train -#' cv <- xgb.cv(data = train$data, label = train$label, nfold = 5, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -#' print(cv) -#' print(cv, verbose=TRUE) -#' -#' @rdname print.xgb.cv -#' @method print xgb.cv.synchronous -#' @export -print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) { - cat('##### xgb.cv ', length(x$folds), '-folds\n', sep = '') - - if (verbose) { - if (!is.null(x$call)) { - cat('call:\n ') - print(x$call) - } - if (!is.null(x$params)) { - cat('params (as set within xgb.cv):\n') - cat( ' ', - paste(names(x$params), - paste0('"', unlist(x$params), '"'), - sep = ' = ', collapse = ', '), '\n', sep = '') - } - if (!is.null(x$callbacks) && length(x$callbacks) > 0) { - cat('callbacks:\n') - lapply(callback.calls(x$callbacks), function(x) { - cat(' ') - print(x) - }) - } - - for (n in c('niter', 'best_iteration', 'best_ntreelimit')) { - if (is.null(x[[n]])) - next - cat(n, ': ', x[[n]], '\n', sep = '') - } - - if (!is.null(x$pred)) { - cat('pred:\n') - str(x$pred) - } - } - - if (verbose) - cat('evaluation_log:\n') - print(x$evaluation_log, row.names = FALSE, ...) - - if (!is.null(x$best_iteration)) { - cat('Best iteration:\n') - print(x$evaluation_log[x$best_iteration], row.names = FALSE, ...) - } - invisible(x) -} diff --git a/ml-xgboost/R-package/R/xgb.dump.R b/ml-xgboost/R-package/R/xgb.dump.R deleted file mode 100644 index ffa3cbc..0000000 --- a/ml-xgboost/R-package/R/xgb.dump.R +++ /dev/null @@ -1,72 +0,0 @@ -#' Dump an xgboost model in text format. -#' -#' Dump an xgboost model in text format. -#' -#' @param model the model object. -#' @param fname the name of the text file where to save the model text dump. -#' If not provided or set to \code{NULL}, the model is returned as a \code{character} vector. -#' @param fmap feature map file representing feature types. -#' Detailed description could be found at -#' \url{https://github.com/dmlc/xgboost/wiki/Binary-Classification#dump-model}. -#' See demo/ for walkthrough example in R, and -#' \url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt} -#' for example Format. -#' @param with_stats whether to dump some additional statistics about the splits. -#' When this option is on, the model dump contains two additional values: -#' gain is the approximate loss function gain we get in each split; -#' cover is the sum of second order gradient in each node. -#' @param dump_format either 'text' or 'json' format could be specified. -#' @param ... currently not used -#' -#' @return -#' If fname is not provided or set to \code{NULL} the function will return the model -#' as a \code{character} vector. Otherwise it will return \code{TRUE}. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' data(agaricus.test, package='xgboost') -#' train <- agaricus.train -#' test <- agaricus.test -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -#' # save the model in file 'xgb.model.dump' -#' dump_path = file.path(tempdir(), 'model.dump') -#' xgb.dump(bst, dump_path, with_stats = TRUE) -#' -#' # print the model without saving it to a file -#' print(xgb.dump(bst, with_stats = TRUE)) -#' -#' # print in JSON format: -#' cat(xgb.dump(bst, with_stats = TRUE, dump_format='json')) -#' -#' @export -xgb.dump <- function(model, fname = NULL, fmap = "", with_stats=FALSE, - dump_format = c("text", "json"), ...) { - check.deprecation(...) - dump_format <- match.arg(dump_format) - if (!inherits(model, "xgb.Booster")) - stop("model: argument must be of type xgb.Booster") - if (!(is.null(fname) || is.character(fname))) - stop("fname: argument must be a character string (when provided)") - if (!(is.null(fmap) || is.character(fmap))) - stop("fmap: argument must be a character string (when provided)") - - model <- xgb.Booster.complete(model) - model_dump <- .Call(XGBoosterDumpModel_R, model$handle, NVL(fmap, "")[1], as.integer(with_stats), - as.character(dump_format)) - - if (is.null(fname)) - model_dump <- stri_replace_all_regex(model_dump, '\t', '') - - if (dump_format == "text") - model_dump <- unlist(stri_split_regex(model_dump, '\n')) - - model_dump <- grep('^\\s*$', model_dump, invert = TRUE, value = TRUE) - - if (is.null(fname)) { - return(model_dump) - } else { - writeLines(model_dump, fname[1]) - return(TRUE) - } -} diff --git a/ml-xgboost/R-package/R/xgb.ggplot.R b/ml-xgboost/R-package/R/xgb.ggplot.R deleted file mode 100644 index eceb5c4..0000000 --- a/ml-xgboost/R-package/R/xgb.ggplot.R +++ /dev/null @@ -1,135 +0,0 @@ -# ggplot backend for the xgboost plotting facilities - - -#' @rdname xgb.plot.importance -#' @export -xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL, - rel_to_first = FALSE, n_clusters = c(1:10), ...) { - - importance_matrix <- xgb.plot.importance(importance_matrix, top_n = top_n, measure = measure, - rel_to_first = rel_to_first, plot = FALSE, ...) - if (!requireNamespace("ggplot2", quietly = TRUE)) { - stop("ggplot2 package is required", call. = FALSE) - } - if (!requireNamespace("Ckmeans.1d.dp", quietly = TRUE)) { - stop("Ckmeans.1d.dp package is required", call. = FALSE) - } - - clusters <- suppressWarnings( - Ckmeans.1d.dp::Ckmeans.1d.dp(importance_matrix$Importance, n_clusters) - ) - importance_matrix[, Cluster := as.character(clusters$cluster)] - - plot <- - ggplot2::ggplot(importance_matrix, - ggplot2::aes(x = factor(Feature, levels = rev(Feature)), y = Importance, width = 0.5), - environment = environment()) + - ggplot2::geom_bar(ggplot2::aes(fill = Cluster), stat = "identity", position = "identity") + - ggplot2::coord_flip() + - ggplot2::xlab("Features") + - ggplot2::ggtitle("Feature importance") + - ggplot2::theme(plot.title = ggplot2::element_text(lineheight = .9, face = "bold"), - panel.grid.major.y = ggplot2::element_blank()) - return(plot) -} - - -#' @rdname xgb.plot.deepness -#' @export -xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med.depth", "med.weight")) { - - if (!requireNamespace("ggplot2", quietly = TRUE)) - stop("ggplot2 package is required for plotting the graph deepness.", call. = FALSE) - - which <- match.arg(which) - - dt_depths <- xgb.plot.deepness(model = model, plot = FALSE) - dt_summaries <- dt_depths[, .(.N, Cover = mean(Cover)), Depth] - setkey(dt_summaries, 'Depth') - - if (which == "2x1") { - p1 <- - ggplot2::ggplot(dt_summaries) + - ggplot2::geom_bar(ggplot2::aes(x = Depth, y = N), stat = "Identity") + - ggplot2::xlab("") + - ggplot2::ylab("Number of leafs") + - ggplot2::ggtitle("Model complexity") + - ggplot2::theme( - plot.title = ggplot2::element_text(lineheight = 0.9, face = "bold"), - panel.grid.major.y = ggplot2::element_blank(), - axis.ticks = ggplot2::element_blank(), - axis.text.x = ggplot2::element_blank() - ) - - p2 <- - ggplot2::ggplot(dt_summaries) + - ggplot2::geom_bar(ggplot2::aes(x = Depth, y = Cover), stat = "Identity") + - ggplot2::xlab("Leaf depth") + - ggplot2::ylab("Weighted cover") - - multiplot(p1, p2, cols = 1) - return(invisible(list(p1, p2))) - - } else if (which == "max.depth") { - p <- - ggplot2::ggplot(dt_depths[, max(Depth), Tree]) + - ggplot2::geom_jitter(ggplot2::aes(x = Tree, y = V1), - height = 0.15, alpha=0.4, size=3, stroke=0) + - ggplot2::xlab("tree #") + - ggplot2::ylab("Max tree leaf depth") - return(p) - - } else if (which == "med.depth") { - p <- - ggplot2::ggplot(dt_depths[, median(as.numeric(Depth)), Tree]) + - ggplot2::geom_jitter(ggplot2::aes(x = Tree, y = V1), - height = 0.15, alpha=0.4, size=3, stroke=0) + - ggplot2::xlab("tree #") + - ggplot2::ylab("Median tree leaf depth") - return(p) - - } else if (which == "med.weight") { - p <- - ggplot2::ggplot(dt_depths[, median(abs(Weight)), Tree]) + - ggplot2::geom_point(ggplot2::aes(x = Tree, y = V1), - alpha=0.4, size=3, stroke=0) + - ggplot2::xlab("tree #") + - ggplot2::ylab("Median absolute leaf weight") - return(p) - } -} - -# Plot multiple ggplot graph aligned by rows and columns. -# ... the plots -# cols number of columns -# internal utility function -multiplot <- function(..., cols = 1) { - plots <- list(...) - num_plots = length(plots) - - layout <- matrix(seq(1, cols * ceiling(num_plots / cols)), - ncol = cols, nrow = ceiling(num_plots / cols)) - - if (num_plots == 1) { - print(plots[[1]]) - } else { - grid::grid.newpage() - grid::pushViewport(grid::viewport(layout = grid::grid.layout(nrow(layout), ncol(layout)))) - for (i in 1:num_plots) { - # Get the i,j matrix positions of the regions that contain this subplot - matchidx <- as.data.table(which(layout == i, arr.ind = TRUE)) - - print( - plots[[i]], vp = grid::viewport( - layout.pos.row = matchidx$row, - layout.pos.col = matchidx$col - ) - ) - } - } -} - -globalVariables(c( - "Cluster", "ggplot", "aes", "geom_bar", "coord_flip", "xlab", "ylab", "ggtitle", "theme", - "element_blank", "element_text", "V1", "Weight" -)) diff --git a/ml-xgboost/R-package/R/xgb.importance.R b/ml-xgboost/R-package/R/xgb.importance.R deleted file mode 100644 index 62e37e8..0000000 --- a/ml-xgboost/R-package/R/xgb.importance.R +++ /dev/null @@ -1,139 +0,0 @@ -#' Importance of features in a model. -#' -#' Creates a \code{data.table} of feature importances in a model. -#' -#' @param feature_names character vector of feature names. If the model already -#' contains feature names, those would be used when \code{feature_names=NULL} (default value). -#' Non-null \code{feature_names} could be provided to override those in the model. -#' @param model object of class \code{xgb.Booster}. -#' @param trees (only for the gbtree booster) an integer vector of tree indices that should be included -#' into the importance calculation. If set to \code{NULL}, all trees of the model are parsed. -#' It could be useful, e.g., in multiclass classification to get feature importances -#' for each class separately. IMPORTANT: the tree index in xgboost models -#' is zero-based (e.g., use \code{trees = 0:4} for first 5 trees). -#' @param data deprecated. -#' @param label deprecated. -#' @param target deprecated. -#' -#' @details -#' -#' This function works for both linear and tree models. -#' -#' For linear models, the importance is the absolute magnitude of linear coefficients. -#' For that reason, in order to obtain a meaningful ranking by importance for a linear model, -#' the features need to be on the same scale (which you also would want to do when using either -#' L1 or L2 regularization). -#' -#' @return -#' -#' For a tree model, a \code{data.table} with the following columns: -#' \itemize{ -#' \item \code{Features} names of the features used in the model; -#' \item \code{Gain} represents fractional contribution of each feature to the model based on -#' the total gain of this feature's splits. Higher percentage means a more important -#' predictive feature. -#' \item \code{Cover} metric of the number of observation related to this feature; -#' \item \code{Frequency} percentage representing the relative number of times -#' a feature have been used in trees. -#' } -#' -#' A linear model's importance \code{data.table} has the following columns: -#' \itemize{ -#' \item \code{Features} names of the features used in the model; -#' \item \code{Weight} the linear coefficient of this feature; -#' \item \code{Class} (only for multiclass models) class label. -#' } -#' -#' If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names}, -#' index of the features will be used instead. Because the index is extracted from the model dump -#' (based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R). -#' -#' @examples -#' -#' # binomial classification using gbtree: -#' data(agaricus.train, package='xgboost') -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -#' xgb.importance(model = bst) -#' -#' # binomial classification using gblinear: -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear", -#' eta = 0.3, nthread = 1, nrounds = 20, objective = "binary:logistic") -#' xgb.importance(model = bst) -#' -#' # multiclass classification using gbtree: -#' nclass <- 3 -#' nrounds <- 10 -#' mbst <- xgboost(data = as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1, -#' max_depth = 3, eta = 0.2, nthread = 2, nrounds = nrounds, -#' objective = "multi:softprob", num_class = nclass) -#' # all classes clumped together: -#' xgb.importance(model = mbst) -#' # inspect importances separately for each class: -#' xgb.importance(model = mbst, trees = seq(from=0, by=nclass, length.out=nrounds)) -#' xgb.importance(model = mbst, trees = seq(from=1, by=nclass, length.out=nrounds)) -#' xgb.importance(model = mbst, trees = seq(from=2, by=nclass, length.out=nrounds)) -#' -#' # multiclass classification using gblinear: -#' mbst <- xgboost(data = scale(as.matrix(iris[, -5])), label = as.numeric(iris$Species) - 1, -#' booster = "gblinear", eta = 0.2, nthread = 1, nrounds = 15, -#' objective = "multi:softprob", num_class = nclass) -#' xgb.importance(model = mbst) -#' -#' @export -xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL, - data = NULL, label = NULL, target = NULL){ - - if (!(is.null(data) && is.null(label) && is.null(target))) - warning("xgb.importance: parameters 'data', 'label' and 'target' are deprecated") - - if (!inherits(model, "xgb.Booster")) - stop("model: must be an object of class xgb.Booster") - - if (is.null(feature_names) && !is.null(model$feature_names)) - feature_names <- model$feature_names - - if (!(is.null(feature_names) || is.character(feature_names))) - stop("feature_names: Has to be a character vector") - - model_text_dump <- xgb.dump(model = model, with_stats = TRUE) - - # linear model - if(model_text_dump[2] == "bias:"){ - weights <- which(model_text_dump == "weight:") %>% - {model_text_dump[(. + 1):length(model_text_dump)]} %>% - as.numeric - - num_class <- NVL(model$params$num_class, 1) - if(is.null(feature_names)) - feature_names <- seq(to = length(weights) / num_class) - 1 - if (length(feature_names) * num_class != length(weights)) - stop("feature_names length does not match the number of features used in the model") - - result <- if (num_class == 1) { - data.table(Feature = feature_names, Weight = weights)[order(-abs(Weight))] - } else { - data.table(Feature = rep(feature_names, each = num_class), - Weight = weights, - Class = seq_len(num_class) - 1)[order(Class, -abs(Weight))] - } - } else { - # tree model - result <- xgb.model.dt.tree(feature_names = feature_names, - text = model_text_dump, - trees = trees)[ - Feature != "Leaf", .(Gain = sum(Quality), - Cover = sum(Cover), - Frequency = .N), by = Feature][ - ,`:=`(Gain = Gain / sum(Gain), - Cover = Cover / sum(Cover), - Frequency = Frequency / sum(Frequency))][ - order(Gain, decreasing = TRUE)] - } - result -} - -# Avoid error messages during CRAN check. -# The reason is that these variables are never declared -# They are mainly column names inferred by Data.table... -globalVariables(c(".", ".N", "Gain", "Cover", "Frequency", "Feature", "Class")) diff --git a/ml-xgboost/R-package/R/xgb.load.R b/ml-xgboost/R-package/R/xgb.load.R deleted file mode 100644 index bda4e7e..0000000 --- a/ml-xgboost/R-package/R/xgb.load.R +++ /dev/null @@ -1,47 +0,0 @@ -#' Load xgboost model from binary file -#' -#' Load xgboost model from the binary model file. -#' -#' @param modelfile the name of the binary input file. -#' -#' @details -#' The input file is expected to contain a model saved in an xgboost-internal binary format -#' using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some -#' appropriate methods from other xgboost interfaces. E.g., a model trained in Python and -#' saved from there in xgboost format, could be loaded from R. -#' -#' Note: a model saved as an R-object, has to be loaded using corresponding R-methods, -#' not \code{xgb.load}. -#' -#' @return -#' An object of \code{xgb.Booster} class. -#' -#' @seealso -#' \code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' data(agaricus.test, package='xgboost') -#' train <- agaricus.train -#' test <- agaricus.test -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -#' xgb.save(bst, 'xgb.model') -#' bst <- xgb.load('xgb.model') -#' if (file.exists('xgb.model')) file.remove('xgb.model') -#' pred <- predict(bst, test$data) -#' @export -xgb.load <- function(modelfile) { - if (is.null(modelfile)) - stop("xgb.load: modelfile cannot be NULL") - - handle <- xgb.Booster.handle(modelfile = modelfile) - # re-use modelfile if it is raw so we do not need to serialize - if (typeof(modelfile) == "raw") { - bst <- xgb.handleToBooster(handle, modelfile) - } else { - bst <- xgb.handleToBooster(handle, NULL) - } - bst <- xgb.Booster.complete(bst, saveraw = TRUE) - return(bst) -} diff --git a/ml-xgboost/R-package/R/xgb.load.raw.R b/ml-xgboost/R-package/R/xgb.load.raw.R deleted file mode 100644 index 2a7d375..0000000 --- a/ml-xgboost/R-package/R/xgb.load.raw.R +++ /dev/null @@ -1,14 +0,0 @@ -#' Load serialised xgboost model from R's raw vector -#' -#' User can generate raw memory buffer by calling xgb.save.raw -#' -#' @param buffer the buffer returned by xgb.save.raw -#' -#' @export -xgb.load.raw <- function(buffer) { - cachelist <- list() - handle <- .Call(XGBoosterCreate_R, cachelist) - .Call(XGBoosterLoadModelFromRaw_R, handle, buffer) - class(handle) <- "xgb.Booster.handle" - return (handle) -} diff --git a/ml-xgboost/R-package/R/xgb.model.dt.tree.R b/ml-xgboost/R-package/R/xgb.model.dt.tree.R deleted file mode 100644 index 6a00797..0000000 --- a/ml-xgboost/R-package/R/xgb.model.dt.tree.R +++ /dev/null @@ -1,159 +0,0 @@ -#' Parse a boosted tree model text dump -#' -#' Parse a boosted tree model text dump into a \code{data.table} structure. -#' -#' @param feature_names character vector of feature names. If the model already -#' contains feature names, those would be used when \code{feature_names=NULL} (default value). -#' Non-null \code{feature_names} could be provided to override those in the model. -#' @param model object of class \code{xgb.Booster} -#' @param text \code{character} vector previously generated by the \code{xgb.dump} -#' function (where parameter \code{with_stats = TRUE} should have been set). -#' \code{text} takes precedence over \code{model}. -#' @param trees an integer vector of tree indices that should be parsed. -#' If set to \code{NULL}, all trees of the model are parsed. -#' It could be useful, e.g., in multiclass classification to get only -#' the trees of one certain class. IMPORTANT: the tree index in xgboost models -#' is zero-based (e.g., use \code{trees = 0:4} for first 5 trees). -#' @param use_int_id a logical flag indicating whether nodes in columns "Yes", "No", "Missing" should be -#' represented as integers (when FALSE) or as "Tree-Node" character strings (when FALSE). -#' @param ... currently not used. -#' -#' @return -#' A \code{data.table} with detailed information about model trees' nodes. -#' -#' The columns of the \code{data.table} are: -#' -#' \itemize{ -#' \item \code{Tree}: integer ID of a tree in a model (zero-based index) -#' \item \code{Node}: integer ID of a node in a tree (zero-based index) -#' \item \code{ID}: character identifier of a node in a model (only when \code{use_int_id=FALSE}) -#' \item \code{Feature}: for a branch node, it's a feature id or name (when available); -#' for a leaf note, it simply labels it as \code{'Leaf'} -#' \item \code{Split}: location of the split for a branch node (split condition is always "less than") -#' \item \code{Yes}: ID of the next node when the split condition is met -#' \item \code{No}: ID of the next node when the split condition is not met -#' \item \code{Missing}: ID of the next node when branch value is missing -#' \item \code{Quality}: either the split gain (change in loss) or the leaf value -#' \item \code{Cover}: metric related to the number of observation either seen by a split -#' or collected by a leaf during training. -#' } -#' -#' When \code{use_int_id=FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers -#' in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from -#' the corresponding trees in the "Node" column. -#' -#' @examples -#' # Basic use: -#' -#' data(agaricus.train, package='xgboost') -#' -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -#' -#' (dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst)) -#' -#' # This bst model already has feature_names stored with it, so those would be used when -#' # feature_names is not set: -#' (dt <- xgb.model.dt.tree(model = bst)) -#' -#' # How to match feature names of splits that are following a current 'Yes' branch: -#' -#' merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)] -#' -#' @export -xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL, - trees = NULL, use_int_id = FALSE, ...){ - check.deprecation(...) - - if (!inherits(model, "xgb.Booster") && !is.character(text)) { - stop("Either 'model' must be an object of class xgb.Booster\n", - " or 'text' must be a character vector with the result of xgb.dump\n", - " (or NULL if 'model' was provided).") - } - - if (is.null(feature_names) && !is.null(model) && !is.null(model$feature_names)) - feature_names <- model$feature_names - - if (!(is.null(feature_names) || is.character(feature_names))) { - stop("feature_names: must be a character vector") - } - - if (!(is.null(trees) || is.numeric(trees))) { - stop("trees: must be a vector of integers.") - } - - if (is.null(text)){ - text <- xgb.dump(model = model, with_stats = TRUE) - } - - if (length(text) < 2 || - sum(stri_detect_regex(text, 'yes=(\\d+),no=(\\d+)')) < 1) { - stop("Non-tree model detected! This function can only be used with tree models.") - } - - position <- which(!is.na(stri_match_first_regex(text, "booster"))) - - add.tree.id <- function(node, tree) if (use_int_id) node else paste(tree, node, sep = "-") - - anynumber_regex <- "[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?" - - td <- data.table(t = text) - td[position, Tree := 1L] - td[, Tree := cumsum(ifelse(is.na(Tree), 0L, Tree)) - 1L] - - if (is.null(trees)) { - trees <- 0:max(td$Tree) - } else { - trees <- trees[trees >= 0 & trees <= max(td$Tree)] - } - td <- td[Tree %in% trees & !grepl('^booster', t)] - - td[, Node := stri_match_first_regex(t, "(\\d+):")[,2] %>% as.integer ] - if (!use_int_id) td[, ID := add.tree.id(Node, Tree)] - td[, isLeaf := !is.na(stri_match_first_regex(t, "leaf"))] - - # parse branch lines - branch_rx <- paste0("f(\\d+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),", - "gain=(", anynumber_regex, "),cover=(", anynumber_regex, ")") - branch_cols <- c("Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover") - td[isLeaf == FALSE, - (branch_cols) := { - # skip some indices with spurious capture groups from anynumber_regex - xtr <- stri_match_first_regex(t, branch_rx)[, c(2,3,5,6,7,8,10), drop = FALSE] - xtr[, 3:5] <- add.tree.id(xtr[, 3:5], Tree) - lapply(seq_len(ncol(xtr)), function(i) xtr[,i]) - }] - # assign feature_names when available - if (!is.null(feature_names)) { - if (length(feature_names) <= max(as.numeric(td$Feature), na.rm = TRUE)) - stop("feature_names has less elements than there are features used in the model") - td[isLeaf == FALSE, Feature := feature_names[as.numeric(Feature) + 1] ] - } - - # parse leaf lines - leaf_rx <- paste0("leaf=(", anynumber_regex, "),cover=(", anynumber_regex, ")") - leaf_cols <- c("Feature", "Quality", "Cover") - td[isLeaf == TRUE, - (leaf_cols) := { - xtr <- stri_match_first_regex(t, leaf_rx)[, c(2,4)] - c("Leaf", lapply(seq_len(ncol(xtr)), function(i) xtr[,i])) - }] - - # convert some columns to numeric - numeric_cols <- c("Split", "Quality", "Cover") - td[, (numeric_cols) := lapply(.SD, as.numeric), .SDcols = numeric_cols] - if (use_int_id) { - int_cols <- c("Yes", "No", "Missing") - td[, (int_cols) := lapply(.SD, as.integer), .SDcols = int_cols] - } - - td[, t := NULL] - td[, isLeaf := NULL] - - td[order(Tree, Node)] -} - -# Avoid error messages during CRAN check. -# The reason is that these variables are never declared -# They are mainly column names inferred by Data.table... -globalVariables(c("Tree", "Node", "ID", "Feature", "t", "isLeaf",".SD", ".SDcols")) diff --git a/ml-xgboost/R-package/R/xgb.plot.deepness.R b/ml-xgboost/R-package/R/xgb.plot.deepness.R deleted file mode 100644 index 87d632a..0000000 --- a/ml-xgboost/R-package/R/xgb.plot.deepness.R +++ /dev/null @@ -1,150 +0,0 @@ -#' Plot model trees deepness -#' -#' Visualizes distributions related to depth of tree leafs. -#' \code{xgb.plot.deepness} uses base R graphics, while \code{xgb.ggplot.deepness} uses the ggplot backend. -#' -#' @param model either an \code{xgb.Booster} model generated by the \code{xgb.train} function -#' or a data.table result of the \code{xgb.model.dt.tree} function. -#' @param plot (base R barplot) whether a barplot should be produced. -#' If FALSE, only a data.table is returned. -#' @param which which distribution to plot (see details). -#' @param ... other parameters passed to \code{barplot} or \code{plot}. -#' -#' @details -#' -#' When \code{which="2x1"}, two distributions with respect to the leaf depth -#' are plotted on top of each other: -#' \itemize{ -#' \item the distribution of the number of leafs in a tree model at a certain depth; -#' \item the distribution of average weighted number of observations ("cover") -#' ending up in leafs at certain depth. -#' } -#' Those could be helpful in determining sensible ranges of the \code{max_depth} -#' and \code{min_child_weight} parameters. -#' -#' When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth -#' per tree with respect to tree number are created. And \code{which="med.weight"} allows to see how -#' a tree's median absolute leaf weight changes through the iterations. -#' -#' This function was inspired by the blog post -#' \url{https://github.com/aysent/random-forest-leaf-visualization}. -#' -#' @return -#' -#' Other than producing plots (when \code{plot=TRUE}), the \code{xgb.plot.deepness} function -#' silently returns a processed data.table where each row corresponds to a terminal leaf in a tree model, -#' and contains information about leaf's depth, cover, and weight (which is used in calculating predictions). -#' -#' The \code{xgb.ggplot.deepness} silently returns either a list of two ggplot graphs when \code{which="2x1"} -#' or a single ggplot graph for the other \code{which} options. -#' -#' @seealso -#' -#' \code{\link{xgb.train}}, \code{\link{xgb.model.dt.tree}}. -#' -#' @examples -#' -#' data(agaricus.train, package='xgboost') -#' -#' # Change max_depth to a higher number to get a more significant result -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 6, -#' eta = 0.1, nthread = 2, nrounds = 50, objective = "binary:logistic", -#' subsample = 0.5, min_child_weight = 2) -#' -#' xgb.plot.deepness(bst) -#' xgb.ggplot.deepness(bst) -#' -#' xgb.plot.deepness(bst, which='max.depth', pch=16, col=rgb(0,0,1,0.3), cex=2) -#' -#' xgb.plot.deepness(bst, which='med.weight', pch=16, col=rgb(0,0,1,0.3), cex=2) -#' -#' @rdname xgb.plot.deepness -#' @export -xgb.plot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med.depth", "med.weight"), - plot = TRUE, ...) { - - if (!(inherits(model, "xgb.Booster") || is.data.table(model))) - stop("model: Has to be either an xgb.Booster model generaged by the xgb.train function\n", - "or a data.table result of the xgb.importance function") - - if (!requireNamespace("igraph", quietly = TRUE)) - stop("igraph package is required for plotting the graph deepness.", call. = FALSE) - - which <- match.arg(which) - - dt_tree <- model - if (inherits(model, "xgb.Booster")) - dt_tree <- xgb.model.dt.tree(model = model) - - if (!all(c("Feature", "Tree", "ID", "Yes", "No", "Cover") %in% colnames(dt_tree))) - stop("Model tree columns are not as expected!\n", - " Note that this function works only for tree models.") - - dt_depths <- merge(get.leaf.depth(dt_tree), dt_tree[, .(ID, Cover, Weight = Quality)], by = "ID") - setkeyv(dt_depths, c("Tree", "ID")) - # count by depth levels, and also calculate average cover at a depth - dt_summaries <- dt_depths[, .(.N, Cover = mean(Cover)), Depth] - setkey(dt_summaries, "Depth") - - if (plot) { - if (which == "2x1") { - op <- par(no.readonly = TRUE) - par(mfrow = c(2,1), - oma = c(3,1,3,1) + 0.1, - mar = c(1,4,1,0) + 0.1) - - dt_summaries[, barplot(N, border = NA, ylab = 'Number of leafs', ...)] - - dt_summaries[, barplot(Cover, border = NA, ylab = "Weighted cover", names.arg = Depth, ...)] - - title("Model complexity", xlab = "Leaf depth", outer = TRUE, line = 1) - par(op) - } else if (which == "max.depth") { - dt_depths[, max(Depth), Tree][ - , plot(jitter(V1, amount = 0.1) ~ Tree, ylab = 'Max tree leaf depth', xlab = "tree #", ...)] - } else if (which == "med.depth") { - dt_depths[, median(as.numeric(Depth)), Tree][ - , plot(jitter(V1, amount = 0.1) ~ Tree, ylab = 'Median tree leaf depth', xlab = "tree #", ...)] - } else if (which == "med.weight") { - dt_depths[, median(abs(Weight)), Tree][ - , plot(V1 ~ Tree, ylab = 'Median absolute leaf weight', xlab = "tree #", ...)] - } - } - invisible(dt_depths) -} - -# Extract path depths from root to leaf -# from data.table containing the nodes and edges of the trees. -# internal utility function -get.leaf.depth <- function(dt_tree) { - # extract tree graph's edges - dt_edges <- rbindlist(list( - dt_tree[Feature != "Leaf", .(ID, To = Yes, Tree)], - dt_tree[Feature != "Leaf", .(ID, To = No, Tree)] - )) - # whether "To" is a leaf: - dt_edges <- - merge(dt_edges, - dt_tree[Feature == "Leaf", .(ID, Leaf = TRUE)], - all.x = TRUE, by.x = "To", by.y = "ID") - dt_edges[is.na(Leaf), Leaf := FALSE] - - dt_edges[, { - graph <- igraph::graph_from_data_frame(.SD[,.(ID, To)]) - # min(ID) in a tree is a root node - paths_tmp <- igraph::shortest_paths(graph, from = min(ID), to = To[Leaf == TRUE]) - # list of paths to each leaf in a tree - paths <- lapply(paths_tmp$vpath, names) - # combine into a resulting path lengths table for a tree - data.table(Depth = sapply(paths, length), ID = To[Leaf == TRUE]) - }, by = Tree] -} - -# Avoid error messages during CRAN check. -# The reason is that these variables are never declared -# They are mainly column names inferred by Data.table... -globalVariables( - c( - ".N", "N", "Depth", "Quality", "Cover", "Tree", "ID", "Yes", "No", "Feature", "Leaf", "Weight" - ) -) diff --git a/ml-xgboost/R-package/R/xgb.plot.importance.R b/ml-xgboost/R-package/R/xgb.plot.importance.R deleted file mode 100644 index 598bd3b..0000000 --- a/ml-xgboost/R-package/R/xgb.plot.importance.R +++ /dev/null @@ -1,125 +0,0 @@ -#' Plot feature importance as a bar graph -#' -#' Represents previously calculated feature importance as a bar graph. -#' \code{xgb.plot.importance} uses base R graphics, while \code{xgb.ggplot.importance} uses the ggplot backend. -#' -#' @param importance_matrix a \code{data.table} returned by \code{\link{xgb.importance}}. -#' @param top_n maximal number of top features to include into the plot. -#' @param measure the name of importance measure to plot. -#' When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear. -#' @param rel_to_first whether importance values should be represented as relative to the highest ranked feature. -#' See Details. -#' @param left_margin (base R barplot) allows to adjust the left margin size to fit feature names. -#' When it is NULL, the existing \code{par('mar')} is used. -#' @param cex (base R barplot) passed as \code{cex.names} parameter to \code{barplot}. -#' @param plot (base R barplot) whether a barplot should be produced. -#' If FALSE, only a data.table is returned. -#' @param n_clusters (ggplot only) a \code{numeric} vector containing the min and the max range -#' of the possible number of clusters of bars. -#' @param ... other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las). -#' -#' @details -#' The graph represents each feature as a horizontal bar of length proportional to the importance of a feature. -#' Features are shown ranked in a decreasing importance order. -#' It works for importances from both \code{gblinear} and \code{gbtree} models. -#' -#' When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}. -#' For gbtree model, that would mean being normalized to the total of 1 -#' ("what is feature's importance contribution relative to the whole model?"). -#' For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients. -#' Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of -#' "what is feature's importance contribution relative to the most important feature?" -#' -#' The ggplot-backend method also performs 1-D clustering of the importance values, -#' with bar colors corresponding to different clusters that have somewhat similar importance values. -#' -#' @return -#' The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE}) -#' and silently returns a processed data.table with \code{n_top} features sorted by importance. -#' -#' The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards. -#' E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result. -#' -#' @seealso -#' \code{\link[graphics]{barplot}}. -#' -#' @examples -#' data(agaricus.train) -#' -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3, -#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -#' -#' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst) -#' -#' xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance") -#' -#' (gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE)) -#' gg + ggplot2::ylab("Frequency") -#' -#' @rdname xgb.plot.importance -#' @export -xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL, - rel_to_first = FALSE, left_margin = 10, cex = NULL, plot = TRUE, ...) { - check.deprecation(...) - if (!is.data.table(importance_matrix)) { - stop("importance_matrix: must be a data.table") - } - - imp_names <- colnames(importance_matrix) - if (is.null(measure)) { - if (all(c("Feature", "Gain") %in% imp_names)) { - measure <- "Gain" - } else if (all(c("Feature", "Weight") %in% imp_names)) { - measure <- "Weight" - } else { - stop("Importance matrix column names are not as expected!") - } - } else { - if (!measure %in% imp_names) - stop("Invalid `measure`") - if (!"Feature" %in% imp_names) - stop("Importance matrix column names are not as expected!") - } - - # also aggregate, just in case when the values were not yet summed up by feature - importance_matrix <- importance_matrix[, Importance := sum(get(measure)), by = Feature] - - # make sure it's ordered - importance_matrix <- importance_matrix[order(-abs(Importance))] - - if (!is.null(top_n)) { - top_n <- min(top_n, nrow(importance_matrix)) - importance_matrix <- head(importance_matrix, top_n) - } - if (rel_to_first) { - importance_matrix[, Importance := Importance/max(abs(Importance))] - } - if (is.null(cex)) { - cex <- 2.5/log2(1 + nrow(importance_matrix)) - } - - if (plot) { - op <- par(no.readonly = TRUE) - mar <- op$mar - if (!is.null(left_margin)) - mar[2] <- left_margin - par(mar = mar) - - # reverse the order of rows to have the highest ranked at the top - importance_matrix[nrow(importance_matrix):1, - barplot(Importance, horiz = TRUE, border = NA, cex.names = cex, - names.arg = Feature, las = 1, ...)] - grid(NULL, NA) - # redraw over the grid - importance_matrix[nrow(importance_matrix):1, - barplot(Importance, horiz = TRUE, border = NA, add = TRUE)] - par(op) - } - - invisible(importance_matrix) -} - -# Avoid error messages during CRAN check. -# The reason is that these variables are never declared -# They are mainly column names inferred by Data.table... -globalVariables(c("Feature", "Importance")) diff --git a/ml-xgboost/R-package/R/xgb.plot.multi.trees.R b/ml-xgboost/R-package/R/xgb.plot.multi.trees.R deleted file mode 100644 index 3e7b04b..0000000 --- a/ml-xgboost/R-package/R/xgb.plot.multi.trees.R +++ /dev/null @@ -1,148 +0,0 @@ -#' Project all trees on one tree and plot it -#' -#' Visualization of the ensemble of trees as a single collective unit. -#' -#' @param model produced by the \code{xgb.train} function. -#' @param feature_names names of each feature as a \code{character} vector. -#' @param features_keep number of features to keep in each position of the multi trees. -#' @param plot_width width in pixels of the graph to produce -#' @param plot_height height in pixels of the graph to produce -#' @param render a logical flag for whether the graph should be rendered (see Value). -#' @param ... currently not used -#' -#' @details -#' -#' This function tries to capture the complexity of a gradient boosted tree model -#' in a cohesive way by compressing an ensemble of trees into a single tree-graph representation. -#' The goal is to improve the interpretability of a model generally seen as black box. -#' -#' Note: this function is applicable to tree booster-based models only. -#' -#' It takes advantage of the fact that the shape of a binary tree is only defined by -#' its depth (therefore, in a boosting model, all trees have similar shape). -#' -#' Moreover, the trees tend to reuse the same features. -#' -#' The function projects each tree onto one, and keeps for each position the -#' \code{features_keep} first features (based on the Gain per feature measure). -#' -#' This function is inspired by this blog post: -#' \url{https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/} -#' -#' @return -#' -#' When \code{render = TRUE}: -#' returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}. -#' Similar to ggplot objects, it needs to be printed to see it when not running from command line. -#' -#' When \code{render = FALSE}: -#' silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}. -#' This could be useful if one wants to modify some of the graph attributes -#' before rendering the graph with \code{\link[DiagrammeR]{render_graph}}. -#' -#' @examples -#' -#' data(agaricus.train, package='xgboost') -#' -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 15, -#' eta = 1, nthread = 2, nrounds = 30, objective = "binary:logistic", -#' min_child_weight = 50, verbose = 0) -#' -#' p <- xgb.plot.multi.trees(model = bst, features_keep = 3) -#' print(p) -#' -#' \dontrun{ -#' # Below is an example of how to save this plot to a file. -#' # Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. -#' library(DiagrammeR) -#' gr <- xgb.plot.multi.trees(model=bst, features_keep = 3, render=FALSE) -#' export_graph(gr, 'tree.pdf', width=1500, height=600) -#' } -#' -#' @export -xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5, plot_width = NULL, plot_height = NULL, - render = TRUE, ...){ - check.deprecation(...) - tree.matrix <- xgb.model.dt.tree(feature_names = feature_names, model = model) - - # first number of the path represents the tree, then the following numbers are related to the path to follow - # root init - root.nodes <- tree.matrix[stri_detect_regex(ID, "\\d+-0"), ID] - tree.matrix[ID %in% root.nodes, abs.node.position := root.nodes] - - precedent.nodes <- root.nodes - - while(tree.matrix[,sum(is.na(abs.node.position))] > 0) { - yes.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(Yes)] - no.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(No)] - yes.nodes.abs.pos <- yes.row.nodes[, abs.node.position] %>% paste0("_0") - no.nodes.abs.pos <- no.row.nodes[, abs.node.position] %>% paste0("_1") - - tree.matrix[ID %in% yes.row.nodes[, Yes], abs.node.position := yes.nodes.abs.pos] - tree.matrix[ID %in% no.row.nodes[, No], abs.node.position := no.nodes.abs.pos] - precedent.nodes <- c(yes.nodes.abs.pos, no.nodes.abs.pos) - } - - tree.matrix[!is.na(Yes), Yes := paste0(abs.node.position, "_0")] - tree.matrix[!is.na(No), No := paste0(abs.node.position, "_1")] - - remove.tree <- . %>% stri_replace_first_regex(pattern = "^\\d+-", replacement = "") - - tree.matrix[,`:=`(abs.node.position = remove.tree(abs.node.position), - Yes = remove.tree(Yes), - No = remove.tree(No))] - - nodes.dt <- tree.matrix[ - , .(Quality = sum(Quality)) - , by = .(abs.node.position, Feature) - ][, .(Text = paste0(Feature[1:min(length(Feature), features_keep)], - " (", - format(Quality[1:min(length(Quality), features_keep)], digits=5), - ")") %>% - paste0(collapse = "\n")) - , by = abs.node.position] - - edges.dt <- tree.matrix[Feature != "Leaf", .(abs.node.position, Yes)] %>% - list(tree.matrix[Feature != "Leaf",.(abs.node.position, No)]) %>% - rbindlist() %>% - setnames(c("From", "To")) %>% - .[, .N, .(From, To)] %>% - .[, N:=NULL] - - nodes <- DiagrammeR::create_node_df( - n = nrow(nodes.dt), - label = nodes.dt[,Text] - ) - - edges <- DiagrammeR::create_edge_df( - from = match(edges.dt[,From], nodes.dt[,abs.node.position]), - to = match(edges.dt[,To], nodes.dt[,abs.node.position]), - rel = "leading_to") - - graph <- DiagrammeR::create_graph( - nodes_df = nodes, - edges_df = edges, - attr_theme = NULL - ) %>% - DiagrammeR::add_global_graph_attrs( - attr_type = "graph", - attr = c("layout", "rankdir"), - value = c("dot", "LR") - ) %>% - DiagrammeR::add_global_graph_attrs( - attr_type = "node", - attr = c("color", "fillcolor", "style", "shape", "fontname"), - value = c("DimGray", "beige", "filled", "rectangle", "Helvetica") - ) %>% - DiagrammeR::add_global_graph_attrs( - attr_type = "edge", - attr = c("color", "arrowsize", "arrowhead", "fontname"), - value = c("DimGray", "1.5", "vee", "Helvetica")) - - if (!render) return(invisible(graph)) - - DiagrammeR::render_graph(graph, width = plot_width, height = plot_height) -} - -globalVariables(c(".N", "N", "From", "To", "Text", "Feature", "no.nodes.abs.pos", - "ID", "Yes", "No", "Tree", "yes.nodes.abs.pos", "abs.node.position")) diff --git a/ml-xgboost/R-package/R/xgb.plot.shap.R b/ml-xgboost/R-package/R/xgb.plot.shap.R deleted file mode 100644 index 18f6aaa..0000000 --- a/ml-xgboost/R-package/R/xgb.plot.shap.R +++ /dev/null @@ -1,218 +0,0 @@ -#' SHAP contribution dependency plots -#' -#' Visualizing the SHAP feature contribution to prediction dependencies on feature value. -#' -#' @param data data as a \code{matrix} or \code{dgCMatrix}. -#' @param shap_contrib a matrix of SHAP contributions that was computed earlier for the above -#' \code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}. -#' @param features a vector of either column indices or of feature names to plot. When it is NULL, -#' feature importance is calculated, and \code{top_n} high ranked features are taken. -#' @param top_n when \code{features} is NULL, top_n [1, 100] most important features in a model are taken. -#' @param model an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib} -#' or \code{features} is missing. -#' @param trees passed to \code{\link{xgb.importance}} when \code{features = NULL}. -#' @param target_class is only relevant for multiclass models. When it is set to a 0-based class index, -#' only SHAP contributions for that specific class are used. -#' If it is not set, SHAP importances are averaged over all classes. -#' @param approxcontrib passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}. -#' @param subsample a random fraction of data points to use for plotting. When it is NULL, -#' it is set so that up to 100K data points are used. -#' @param n_col a number of columns in a grid of plots. -#' @param col color of the scatterplot markers. -#' @param pch scatterplot marker. -#' @param discrete_n_uniq a maximal number of unique values in a feature to consider it as discrete. -#' @param discrete_jitter an \code{amount} parameter of jitter added to discrete features' positions. -#' @param ylab a y-axis label in 1D plots. -#' @param plot_NA whether the contributions of cases with missing values should also be plotted. -#' @param col_NA a color of marker for missing value contributions. -#' @param pch_NA a marker type for NA values. -#' @param pos_NA a relative position of the x-location where NA values are shown: -#' \code{min(x) + (max(x) - min(x)) * pos_NA}. -#' @param plot_loess whether to plot loess-smoothed curves. The smoothing is only done for features with -#' more than 5 distinct values. -#' @param col_loess a color to use for the loess curves. -#' @param span_loess the \code{span} parameter in \code{\link[stats]{loess}}'s call. -#' @param which whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far. -#' @param plot whether a plot should be drawn. If FALSE, only a lits of matrices is returned. -#' @param ... other parameters passed to \code{plot}. -#' -#' @details -#' -#' These scatterplots represent how SHAP feature contributions depend of feature values. -#' The similarity to partial dependency plots is that they also give an idea for how feature values -#' affect predictions. However, in partial dependency plots, we usually see marginal dependencies -#' of model prediction on feature value, while SHAP contribution dependency plots display the estimated -#' contributions of a feature to model prediction for each individual case. -#' -#' When \code{plot_loess = TRUE} is set, feature values are rounded to 3 significant digits and -#' weighted LOESS is computed and plotted, where weights are the numbers of data points -#' at each rounded value. -#' -#' Note: SHAP contributions are shown on the scale of model margin. E.g., for a logistic binomial objective, -#' the margin is prediction before a sigmoidal transform into probability-like values. -#' Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP -#' contributions for all features + bias), depending on the objective used, transforming SHAP -#' contributions for a feature from the marginal to the prediction space is not necessarily -#' a meaningful thing to do. -#' -#' @return -#' -#' In addition to producing plots (when \code{plot=TRUE}), it silently returns a list of two matrices: -#' \itemize{ -#' \item \code{data} the values of selected features; -#' \item \code{shap_contrib} the contributions of selected features. -#' } -#' -#' @references -#' -#' Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874} -#' -#' Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060} -#' -#' @examples -#' -#' data(agaricus.train, package='xgboost') -#' data(agaricus.test, package='xgboost') -#' -#' bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50, -#' eta = 0.1, max_depth = 3, subsample = .5, -#' method = "hist", objective = "binary:logistic", nthread = 2, verbose = 0) -#' -#' xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none") -#' contr <- predict(bst, agaricus.test$data, predcontrib = TRUE) -#' xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3) -#' -#' # multiclass example - plots for each class separately: -#' nclass <- 3 -#' nrounds <- 20 -#' x <- as.matrix(iris[, -5]) -#' set.seed(123) -#' is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values -#' mbst <- xgboost(data = x, label = as.numeric(iris$Species) - 1, nrounds = nrounds, -#' max_depth = 2, eta = 0.3, subsample = .5, nthread = 2, -#' objective = "multi:softprob", num_class = nclass, verbose = 0) -#' trees0 <- seq(from=0, by=nclass, length.out=nrounds) -#' col <- rgb(0, 0, 1, 0.5) -#' xgb.plot.shap(x, model = mbst, trees = trees0, target_class = 0, top_n = 4, -#' n_col = 2, col = col, pch = 16, pch_NA = 17) -#' xgb.plot.shap(x, model = mbst, trees = trees0 + 1, target_class = 1, top_n = 4, -#' n_col = 2, col = col, pch = 16, pch_NA = 17) -#' xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4, -#' n_col = 2, col = col, pch = 16, pch_NA = 17) -#' -#' @rdname xgb.plot.shap -#' @export -xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL, - trees = NULL, target_class = NULL, approxcontrib = FALSE, - subsample = NULL, n_col = 1, col = rgb(0, 0, 1, 0.2), pch = '.', - discrete_n_uniq = 5, discrete_jitter = 0.01, ylab = "SHAP", - plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6), pch_NA = '.', pos_NA = 1.07, - plot_loess = TRUE, col_loess = 2, span_loess = 0.5, - which = c("1d", "2d"), plot = TRUE, ...) { - - if (!is.matrix(data) && !inherits(data, "dgCMatrix")) - stop("data: must be either matrix or dgCMatrix") - - if (is.null(shap_contrib) && (is.null(model) || !inherits(model, "xgb.Booster"))) - stop("when shap_contrib is not provided, one must provide an xgb.Booster model") - - if (is.null(features) && (is.null(model) || !inherits(model, "xgb.Booster"))) - stop("when features are not provided, one must provide an xgb.Booster model to rank the features") - - if (!is.null(shap_contrib) && - (!is.matrix(shap_contrib) || nrow(shap_contrib) != nrow(data) || ncol(shap_contrib) != ncol(data) + 1)) - stop("shap_contrib is not compatible with the provided data") - - nsample <- if (is.null(subsample)) min(100000, nrow(data)) else as.integer(subsample * nrow(data)) - idx <- sample(1:nrow(data), nsample) - data <- data[idx,] - - if (is.null(shap_contrib)) { - shap_contrib <- predict(model, data, predcontrib = TRUE, approxcontrib = approxcontrib) - } else { - shap_contrib <- shap_contrib[idx,] - } - - which <- match.arg(which) - if (which == "2d") - stop("2D plots are not implemented yet") - - if (is.null(features)) { - imp <- xgb.importance(model = model, trees = trees) - top_n <- as.integer(top_n[1]) - if (top_n < 1 && top_n > 100) - stop("top_n: must be an integer within [1, 100]") - features <- imp$Feature[1:min(top_n, NROW(imp))] - } - - if (is.character(features)) { - if (is.null(colnames(data))) - stop("Either provide `data` with column names or provide `features` as column indices") - features <- match(features, colnames(data)) - } - - if (n_col > length(features)) n_col <- length(features) - - if (is.list(shap_contrib)) { # multiclass: either choose a class or merge - shap_contrib <- if (!is.null(target_class)) shap_contrib[[target_class + 1]] - else Reduce("+", lapply(shap_contrib, abs)) - } - - shap_contrib <- shap_contrib[, features, drop = FALSE] - data <- data[, features, drop = FALSE] - cols <- colnames(data) - if (is.null(cols)) cols <- colnames(shap_contrib) - if (is.null(cols)) cols <- paste0('X', 1:ncol(data)) - colnames(data) <- cols - colnames(shap_contrib) <- cols - - if (plot && which == "1d") { - op <- par(mfrow = c(ceiling(length(features) / n_col), n_col), - oma = c(0,0,0,0) + 0.2, - mar = c(3.5,3.5,0,0) + 0.1, - mgp = c(1.7, 0.6, 0)) - for (f in cols) { - ord <- order(data[, f]) - x <- data[, f][ord] - y <- shap_contrib[, f][ord] - x_lim <- range(x, na.rm = TRUE) - y_lim <- range(y, na.rm = TRUE) - do_na <- plot_NA && any(is.na(x)) - if (do_na) { - x_range <- diff(x_lim) - loc_na <- min(x, na.rm = TRUE) + x_range * pos_NA - x_lim <- range(c(x_lim, loc_na)) - } - x_uniq <- unique(x) - x2plot <- x - # add small jitter for discrete features with <= 5 distinct values - if (length(x_uniq) <= discrete_n_uniq) - x2plot <- jitter(x, amount = discrete_jitter * min(diff(x_uniq), na.rm = TRUE)) - plot(x2plot, y, pch = pch, xlab = f, col = col, xlim = x_lim, ylim = y_lim, ylab = ylab, ...) - grid() - if (plot_loess) { - # compress x to 3 digits, and mean-aggredate y - zz <- data.table(x = signif(x, 3), y)[, .(.N, y=mean(y)), x] - if (nrow(zz) <= 5) { - lines(zz$x, zz$y, col = col_loess) - } else { - lo <- stats::loess(y ~ x, data = zz, weights = zz$N, span = span_loess) - zz$y_lo <- predict(lo, zz, type = "link") - lines(zz$x, zz$y_lo, col = col_loess) - } - } - if (do_na) { - i_na <- which(is.na(x)) - x_na <- rep(loc_na, length(i_na)) - x_na <- jitter(x_na, amount = x_range * 0.01) - points(x_na, y[i_na], pch = pch_NA, col = col_NA) - } - } - par(op) - } - if (plot && which == "2d") { - # TODO - warning("Bivariate plotting is currently not available.") - } - invisible(list(data = data, shap_contrib = shap_contrib)) -} diff --git a/ml-xgboost/R-package/R/xgb.plot.tree.R b/ml-xgboost/R-package/R/xgb.plot.tree.R deleted file mode 100644 index 29c37d6..0000000 --- a/ml-xgboost/R-package/R/xgb.plot.tree.R +++ /dev/null @@ -1,138 +0,0 @@ -#' Plot a boosted tree model -#' -#' Read a tree model text dump and plot the model. -#' -#' @param feature_names names of each feature as a \code{character} vector. -#' @param model produced by the \code{xgb.train} function. -#' @param trees an integer vector of tree indices that should be visualized. -#' If set to \code{NULL}, all trees of the model are included. -#' IMPORTANT: the tree index in xgboost model is zero-based -#' (e.g., use \code{trees = 0:2} for the first 3 trees in a model). -#' @param plot_width the width of the diagram in pixels. -#' @param plot_height the height of the diagram in pixels. -#' @param render a logical flag for whether the graph should be rendered (see Value). -#' @param show_node_id a logical flag for whether to show node id's in the graph. -#' @param ... currently not used. -#' -#' @details -#' -#' The content of each node is organised that way: -#' -#' \itemize{ -#' \item Feature name. -#' \item \code{Cover}: The sum of second order gradient of training data classified to the leaf. -#' If it is square loss, this simply corresponds to the number of instances seen by a split -#' or collected by a leaf during training. -#' The deeper in the tree a node is, the lower this metric will be. -#' \item \code{Gain} (for split nodes): the information gain metric of a split -#' (corresponds to the importance of the node in the model). -#' \item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction. -#' } -#' The tree root nodes also indicate the Tree index (0-based). -#' -#' The "Yes" branches are marked by the "< split_value" label. -#' The branches that also used for missing values are marked as bold -#' (as in "carrying extra capacity"). -#' -#' This function uses \href{http://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR. -#' -#' @return -#' -#' When \code{render = TRUE}: -#' returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}. -#' Similar to ggplot objects, it needs to be printed to see it when not running from command line. -#' -#' When \code{render = FALSE}: -#' silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}. -#' This could be useful if one wants to modify some of the graph attributes -#' before rendering the graph with \code{\link[DiagrammeR]{render_graph}}. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3, -#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -#' # plot all the trees -#' xgb.plot.tree(model = bst) -#' # plot only the first tree and display the node ID: -#' xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE) -#' -#' \dontrun{ -#' # Below is an example of how to save this plot to a file. -#' # Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. -#' library(DiagrammeR) -#' gr <- xgb.plot.tree(model=bst, trees=0:1, render=FALSE) -#' export_graph(gr, 'tree.pdf', width=1500, height=1900) -#' export_graph(gr, 'tree.png', width=1500, height=1900) -#' } -#' -#' @export -xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot_width = NULL, plot_height = NULL, - render = TRUE, show_node_id = FALSE, ...){ - check.deprecation(...) - if (!inherits(model, "xgb.Booster")) { - stop("model: Has to be an object of class xgb.Booster") - } - - if (!requireNamespace("DiagrammeR", quietly = TRUE)) { - stop("DiagrammeR package is required for xgb.plot.tree", call. = FALSE) - } - - dt <- xgb.model.dt.tree(feature_names = feature_names, model = model, trees = trees) - - dt[, label:= paste0(Feature, "\nCover: ", Cover, ifelse(Feature == "Leaf", "\nValue: ", "\nGain: "), Quality)] - if (show_node_id) - dt[, label := paste0(ID, ": ", label)] - dt[Node == 0, label := paste0("Tree ", Tree, "\n", label)] - dt[, shape:= "rectangle"][Feature == "Leaf", shape:= "oval"] - dt[, filledcolor:= "Beige"][Feature == "Leaf", filledcolor:= "Khaki"] - # in order to draw the first tree on top: - dt <- dt[order(-Tree)] - - nodes <- DiagrammeR::create_node_df( - n = nrow(dt), - ID = dt$ID, - label = dt$label, - fillcolor = dt$filledcolor, - shape = dt$shape, - data = dt$Feature, - fontcolor = "black") - - edges <- DiagrammeR::create_edge_df( - from = match(dt[Feature != "Leaf", c(ID)] %>% rep(2), dt$ID), - to = match(dt[Feature != "Leaf", c(Yes, No)], dt$ID), - label = dt[Feature != "Leaf", paste("<", Split)] %>% - c(rep("", nrow(dt[Feature != "Leaf"]))), - style = dt[Feature != "Leaf", ifelse(Missing == Yes, "bold", "solid")] %>% - c(dt[Feature != "Leaf", ifelse(Missing == No, "bold", "solid")]), - rel = "leading_to") - - graph <- DiagrammeR::create_graph( - nodes_df = nodes, - edges_df = edges, - attr_theme = NULL - ) %>% - DiagrammeR::add_global_graph_attrs( - attr_type = "graph", - attr = c("layout", "rankdir"), - value = c("dot", "LR") - ) %>% - DiagrammeR::add_global_graph_attrs( - attr_type = "node", - attr = c("color", "style", "fontname"), - value = c("DimGray", "filled", "Helvetica") - ) %>% - DiagrammeR::add_global_graph_attrs( - attr_type = "edge", - attr = c("color", "arrowsize", "arrowhead", "fontname"), - value = c("DimGray", "1.5", "vee", "Helvetica")) - - if (!render) return(invisible(graph)) - - DiagrammeR::render_graph(graph, width = plot_width, height = plot_height) -} - -# Avoid error messages during CRAN check. -# The reason is that these variables are never declared -# They are mainly column names inferred by Data.table... -globalVariables(c("Feature", "ID", "Cover", "Quality", "Split", "Yes", "No", "Missing", ".", "shape", "filledcolor", "label")) diff --git a/ml-xgboost/R-package/R/xgb.save.R b/ml-xgboost/R-package/R/xgb.save.R deleted file mode 100644 index d969dae..0000000 --- a/ml-xgboost/R-package/R/xgb.save.R +++ /dev/null @@ -1,43 +0,0 @@ -#' Save xgboost model to binary file -#' -#' Save xgboost model to a file in binary format. -#' -#' @param model model object of \code{xgb.Booster} class. -#' @param fname name of the file to write. -#' -#' @details -#' This methods allows to save a model in an xgboost-internal binary format which is universal -#' among the various xgboost interfaces. In R, the saved model file could be read-in later -#' using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter -#' of \code{\link{xgb.train}}. -#' -#' Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}} -#' or \code{\link[base]{save}}). However, it would then only be compatible with R, and -#' corresponding R-methods would need to be used to load it. -#' -#' @seealso -#' \code{\link{xgb.load}}, \code{\link{xgb.Booster.complete}}. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' data(agaricus.test, package='xgboost') -#' train <- agaricus.train -#' test <- agaricus.test -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -#' xgb.save(bst, 'xgb.model') -#' bst <- xgb.load('xgb.model') -#' if (file.exists('xgb.model')) file.remove('xgb.model') -#' pred <- predict(bst, test$data) -#' @export -xgb.save <- function(model, fname) { - if (typeof(fname) != "character") - stop("fname must be character") - if (!inherits(model, "xgb.Booster")) { - stop("model must be xgb.Booster.", - if (inherits(model, "xgb.DMatrix")) " Use xgb.DMatrix.save to save an xgb.DMatrix object." else "") - } - model <- xgb.Booster.complete(model, saveraw = FALSE) - .Call(XGBoosterSaveModel_R, model$handle, fname[1]) - return(TRUE) -} diff --git a/ml-xgboost/R-package/R/xgb.save.raw.R b/ml-xgboost/R-package/R/xgb.save.raw.R deleted file mode 100644 index 967a314..0000000 --- a/ml-xgboost/R-package/R/xgb.save.raw.R +++ /dev/null @@ -1,23 +0,0 @@ -#' Save xgboost model to R's raw vector, -#' user can call xgb.load.raw to load the model back from raw vector -#' -#' Save xgboost model from xgboost or xgb.train -#' -#' @param model the model object. -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' data(agaricus.test, package='xgboost') -#' train <- agaricus.train -#' test <- agaricus.test -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -#' raw <- xgb.save.raw(bst) -#' bst <- xgb.load.raw(raw) -#' pred <- predict(bst, test$data) -#' -#' @export -xgb.save.raw <- function(model) { - handle <- xgb.get.handle(model) - .Call(XGBoosterModelToRaw_R, handle) -} diff --git a/ml-xgboost/R-package/R/xgb.serialize.R b/ml-xgboost/R-package/R/xgb.serialize.R deleted file mode 100644 index 00bbb42..0000000 --- a/ml-xgboost/R-package/R/xgb.serialize.R +++ /dev/null @@ -1,21 +0,0 @@ -#' Serialize the booster instance into R's raw vector. The serialization method differs -#' from \code{\link{xgb.save.raw}} as the latter one saves only the model but not -#' parameters. This serialization format is not stable across different xgboost versions. -#' -#' @param booster the booster instance -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' data(agaricus.test, package='xgboost') -#' train <- agaricus.train -#' test <- agaricus.test -#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2, -#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -#' raw <- xgb.serialize(bst) -#' bst <- xgb.unserialize(raw) -#' -#' @export -xgb.serialize <- function(booster) { - handle <- xgb.get.handle(booster) - .Call(XGBoosterSerializeToBuffer_R, handle) -} diff --git a/ml-xgboost/R-package/R/xgb.train.R b/ml-xgboost/R-package/R/xgb.train.R deleted file mode 100644 index 8733bcc..0000000 --- a/ml-xgboost/R-package/R/xgb.train.R +++ /dev/null @@ -1,377 +0,0 @@ -#' eXtreme Gradient Boosting Training -#' -#' \code{xgb.train} is an advanced interface for training an xgboost model. -#' The \code{xgboost} function is a simpler wrapper for \code{xgb.train}. -#' -#' @param params the list of parameters. -#' The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}. -#' Below is a shorter summary: -#' -#' 1. General Parameters -#' -#' \itemize{ -#' \item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}. -#' } -#' -#' 2. Booster Parameters -#' -#' 2.1. Parameter for Tree Booster -#' -#' \itemize{ -#' \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3 -#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be. -#' \item \code{max_depth} maximum depth of a tree. Default: 6 -#' \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1 -#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1 -#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1 -#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1 -#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint. -#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints. -#' } -#' -#' 2.2. Parameter for Linear Booster -#' -#' \itemize{ -#' \item \code{lambda} L2 regularization term on weights. Default: 0 -#' \item \code{lambda_bias} L2 regularization term on bias. Default: 0 -#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0 -#' } -#' -#' 3. Task Parameters -#' -#' \itemize{ -#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below: -#' \itemize{ -#' \item \code{reg:squarederror} Regression with squared loss (Default). -#' \item \code{reg:logistic} logistic regression. -#' \item \code{binary:logistic} logistic regression for binary classification. Output probability. -#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation. -#' \item \code{num_class} set the number of classes. To use only with multiclass objectives. -#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}. -#' \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class. -#' \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss. -#' } -#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5 -#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section. -#' } -#' -#' @param data training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input. -#' \code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or name of a local data file. -#' @param nrounds max number of boosting iterations. -#' @param watchlist named list of xgb.DMatrix datasets to use for evaluating model performance. -#' Metrics specified in either \code{eval_metric} or \code{feval} will be computed for each -#' of these datasets during each boosting iteration, and stored in the end as a field named -#' \code{evaluation_log} in the resulting object. When either \code{verbose>=1} or -#' \code{\link{cb.print.evaluation}} callback is engaged, the performance results are continuously -#' printed out during the training. -#' E.g., specifying \code{watchlist=list(validation1=mat1, validation2=mat2)} allows to track -#' the performance of each round's model on mat1 and mat2. -#' @param obj customized objective function. Returns gradient and second order -#' gradient with given prediction and dtrain. -#' @param feval customized evaluation function. Returns -#' \code{list(metric='metric-name', value='metric-value')} with given -#' prediction and dtrain. -#' @param verbose If 0, xgboost will stay silent. If 1, it will print information about performance. -#' If 2, some additional information will be printed out. -#' Note that setting \code{verbose > 0} automatically engages the -#' \code{cb.print.evaluation(period=1)} callback function. -#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}. -#' Default is 1 which means all messages are printed. This parameter is passed to the -#' \code{\link{cb.print.evaluation}} callback. -#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered. -#' If set to an integer \code{k}, training with a validation set will stop if the performance -#' doesn't improve for \code{k} rounds. -#' Setting this parameter engages the \code{\link{cb.early.stop}} callback. -#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set, -#' then this parameter must be set as well. -#' When it is \code{TRUE}, it means the larger the evaluation score the better. -#' This parameter is passed to the \code{\link{cb.early.stop}} callback. -#' @param save_period when it is non-NULL, model is saved to disk after every \code{save_period} rounds, -#' 0 means save at the end. The saving is handled by the \code{\link{cb.save.model}} callback. -#' @param save_name the name or path for periodically saved model file. -#' @param xgb_model a previously built model to continue the training from. -#' Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a -#' file with a previously saved model. -#' @param callbacks a list of callback functions to perform various task during boosting. -#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the -#' parameters' values. User can provide either existing or their own callback methods in order -#' to customize the training process. -#' @param ... other parameters to pass to \code{params}. -#' @param label vector of response values. Should not be provided when data is -#' a local data file name or an \code{xgb.DMatrix}. -#' @param missing by default is set to NA, which means that NA values should be considered as 'missing' -#' by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values. -#' This parameter is only used when input is a dense matrix. -#' @param weight a vector indicating the weight for each row of the input. -#' -#' @details -#' These are the training functions for \code{xgboost}. -#' -#' The \code{xgb.train} interface supports advanced features such as \code{watchlist}, -#' customized objective and evaluation metric functions, therefore it is more flexible -#' than the \code{xgboost} interface. -#' -#' Parallelization is automatically enabled if \code{OpenMP} is present. -#' Number of threads can also be manually specified via \code{nthread} parameter. -#' -#' The evaluation metric is chosen automatically by Xgboost (according to the objective) -#' when the \code{eval_metric} parameter is not provided. -#' User may set one or several \code{eval_metric} parameters. -#' Note that when using a customized metric, only this single metric can be used. -#' The following is the list of built-in metrics for which Xgboost provides optimized implementation: -#' \itemize{ -#' \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error} -#' \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood} -#' \item \code{mlogloss} multiclass logloss. \url{http://wiki.fast.ai/index.php/Log_Loss} -#' \item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}. -#' By default, it uses the 0.5 threshold for predicted values to define negative and positive instances. -#' Different threshold (e.g., 0.) could be specified as "error@0." -#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}. -#' \item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation. -#' \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation. -#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG} -#' } -#' -#' The following callbacks are automatically created when certain parameters are set: -#' \itemize{ -#' \item \code{cb.print.evaluation} is turned on when \code{verbose > 0}; -#' and the \code{print_every_n} parameter is passed to it. -#' \item \code{cb.evaluation.log} is on when \code{watchlist} is present. -#' \item \code{cb.early.stop}: when \code{early_stopping_rounds} is set. -#' \item \code{cb.save.model}: when \code{save_period > 0} is set. -#' } -#' -#' @return -#' An object of class \code{xgb.Booster} with the following elements: -#' \itemize{ -#' \item \code{handle} a handle (pointer) to the xgboost model in memory. -#' \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type. -#' \item \code{niter} number of boosting iterations. -#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the -#' first column corresponding to iteration number and the rest corresponding to evaluation -#' metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback. -#' \item \code{call} a function call. -#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not -#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback. -#' \item \code{callbacks} callback functions that were either automatically assigned or -#' explicitly passed. -#' \item \code{best_iteration} iteration number with the best evaluation metric value -#' (only available with early stopping). -#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration, -#' which could further be used in \code{predict} method -#' (only available with early stopping). -#' \item \code{best_score} the best evaluation metric value during early stopping. -#' (only available with early stopping). -#' \item \code{feature_names} names of the training dataset features -#' (only when column names were defined in training data). -#' \item \code{nfeatures} number of features in training data. -#' } -#' -#' @seealso -#' \code{\link{callbacks}}, -#' \code{\link{predict.xgb.Booster}}, -#' \code{\link{xgb.cv}} -#' -#' @references -#' -#' Tianqi Chen and Carlos Guestrin, "XGBoost: A Scalable Tree Boosting System", -#' 22nd SIGKDD Conference on Knowledge Discovery and Data Mining, 2016, \url{https://arxiv.org/abs/1603.02754} -#' -#' @examples -#' data(agaricus.train, package='xgboost') -#' data(agaricus.test, package='xgboost') -#' -#' dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -#' dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) -#' watchlist <- list(train = dtrain, eval = dtest) -#' -#' ## A simple xgb.train example: -#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2, -#' objective = "binary:logistic", eval_metric = "auc") -#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist) -#' -#' -#' ## An xgb.train example where custom objective and evaluation metric are used: -#' logregobj <- function(preds, dtrain) { -#' labels <- getinfo(dtrain, "label") -#' preds <- 1/(1 + exp(-preds)) -#' grad <- preds - labels -#' hess <- preds * (1 - preds) -#' return(list(grad = grad, hess = hess)) -#' } -#' evalerror <- function(preds, dtrain) { -#' labels <- getinfo(dtrain, "label") -#' err <- as.numeric(sum(labels != (preds > 0)))/length(labels) -#' return(list(metric = "error", value = err)) -#' } -#' -#' # These functions could be used by passing them either: -#' # as 'objective' and 'eval_metric' parameters in the params list: -#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2, -#' objective = logregobj, eval_metric = evalerror) -#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist) -#' -#' # or through the ... arguments: -#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2) -#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, -#' objective = logregobj, eval_metric = evalerror) -#' -#' # or as dedicated 'obj' and 'feval' parameters of xgb.train: -#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, -#' obj = logregobj, feval = evalerror) -#' -#' -#' ## An xgb.train example of using variable learning rates at each iteration: -#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2, -#' objective = "binary:logistic", eval_metric = "auc") -#' my_etas <- list(eta = c(0.5, 0.1)) -#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, -#' callbacks = list(cb.reset.parameters(my_etas))) -#' -#' ## Early stopping: -#' bst <- xgb.train(param, dtrain, nrounds = 25, watchlist, -#' early_stopping_rounds = 3) -#' -#' ## An 'xgboost' interface example: -#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, -#' max_depth = 2, eta = 1, nthread = 2, nrounds = 2, -#' objective = "binary:logistic") -#' pred <- predict(bst, agaricus.test$data) -#' -#' @rdname xgb.train -#' @export -xgb.train <- function(params = list(), data, nrounds, watchlist = list(), - obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L, - early_stopping_rounds = NULL, maximize = NULL, - save_period = NULL, save_name = "xgboost.model", - xgb_model = NULL, callbacks = list(), ...) { - - check.deprecation(...) - - params <- check.booster.params(params, ...) - - check.custom.obj() - check.custom.eval() - - # data & watchlist checks - dtrain <- data - if (!inherits(dtrain, "xgb.DMatrix")) - stop("second argument dtrain must be xgb.DMatrix") - if (length(watchlist) > 0) { - if (typeof(watchlist) != "list" || - !all(vapply(watchlist, inherits, logical(1), what = 'xgb.DMatrix'))) - stop("watchlist must be a list of xgb.DMatrix elements") - evnames <- names(watchlist) - if (is.null(evnames) || any(evnames == "")) - stop("each element of the watchlist must have a name tag") - } - - # evaluation printing callback - params <- c(params) - print_every_n <- max( as.integer(print_every_n), 1L) - if (!has.callbacks(callbacks, 'cb.print.evaluation') && - verbose) { - callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n)) - } - # evaluation log callback: it is automatically enabled when watchlist is provided - evaluation_log <- list() - if (!has.callbacks(callbacks, 'cb.evaluation.log') && - length(watchlist) > 0) { - callbacks <- add.cb(callbacks, cb.evaluation.log()) - } - # Model saving callback - if (!is.null(save_period) && - !has.callbacks(callbacks, 'cb.save.model')) { - callbacks <- add.cb(callbacks, cb.save.model(save_period, save_name)) - } - # Early stopping callback - stop_condition <- FALSE - if (!is.null(early_stopping_rounds) && - !has.callbacks(callbacks, 'cb.early.stop')) { - callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds, - maximize = maximize, verbose = verbose)) - } - - # Sort the callbacks into categories - cb <- categorize.callbacks(callbacks) - params['validate_parameters'] <- TRUE - if (!is.null(params[['seed']])) { - warning("xgb.train: `seed` is ignored in R package. Use `set.seed()` instead.") - } - - # The tree updating process would need slightly different handling - is_update <- NVL(params[['process_type']], '.') == 'update' - - # Construct a booster (either a new one or load from xgb_model) - handle <- xgb.Booster.handle(params, append(watchlist, dtrain), xgb_model) - bst <- xgb.handleToBooster(handle) - - # extract parameters that can affect the relationship b/w #trees and #iterations - num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) - num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1) - - # When the 'xgb_model' was set, find out how many boosting iterations it has - niter_init <- 0 - if (!is.null(xgb_model)) { - niter_init <- as.numeric(xgb.attr(bst, 'niter')) + 1 - if (length(niter_init) == 0) { - niter_init <- xgb.ntree(bst) %/% (num_parallel_tree * num_class) - } - } - if(is_update && nrounds > niter_init) - stop("nrounds cannot be larger than ", niter_init, " (nrounds of xgb_model)") - - # TODO: distributed code - rank <- 0 - - niter_skip <- ifelse(is_update, 0, niter_init) - begin_iteration <- niter_skip + 1 - end_iteration <- niter_skip + nrounds - - # the main loop for boosting iterations - for (iteration in begin_iteration:end_iteration) { - - for (f in cb$pre_iter) f() - - xgb.iter.update(bst$handle, dtrain, iteration - 1, obj) - - bst_evaluation <- numeric(0) - if (length(watchlist) > 0) - bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval) - - xgb.attr(bst$handle, 'niter') <- iteration - 1 - - for (f in cb$post_iter) f() - - if (stop_condition) break - } - for (f in cb$finalize) f(finalize = TRUE) - - bst <- xgb.Booster.complete(bst, saveraw = TRUE) - - # store the total number of boosting iterations - bst$niter = end_iteration - - # store the evaluation results - if (length(evaluation_log) > 0 && - nrow(evaluation_log) > 0) { - # include the previous compatible history when available - if (inherits(xgb_model, 'xgb.Booster') && - !is_update && - !is.null(xgb_model$evaluation_log) && - isTRUE(all.equal(colnames(evaluation_log), - colnames(xgb_model$evaluation_log)))) { - evaluation_log <- rbindlist(list(xgb_model$evaluation_log, evaluation_log)) - } - bst$evaluation_log <- evaluation_log - } - - bst$call <- match.call() - bst$params <- params - bst$callbacks <- callbacks - if (!is.null(colnames(dtrain))) - bst$feature_names <- colnames(dtrain) - bst$nfeatures <- ncol(dtrain) - - return(bst) -} diff --git a/ml-xgboost/R-package/R/xgb.unserialize.R b/ml-xgboost/R-package/R/xgb.unserialize.R deleted file mode 100644 index 1a62e4c..0000000 --- a/ml-xgboost/R-package/R/xgb.unserialize.R +++ /dev/null @@ -1,12 +0,0 @@ -#' Load the instance back from \code{\link{xgb.serialize}} -#' -#' @param buffer the buffer containing booster instance saved by \code{\link{xgb.serialize}} -#' -#' @export -xgb.unserialize <- function(buffer) { - cachelist <- list() - handle <- .Call(XGBoosterCreate_R, cachelist) - .Call(XGBoosterUnserializeFromBuffer_R, handle, buffer) - class(handle) <- "xgb.Booster.handle" - return (handle) -} diff --git a/ml-xgboost/R-package/R/xgboost.R b/ml-xgboost/R-package/R/xgboost.R deleted file mode 100644 index 2fddfa4..0000000 --- a/ml-xgboost/R-package/R/xgboost.R +++ /dev/null @@ -1,113 +0,0 @@ -# Simple interface for training an xgboost model that wraps \code{xgb.train}. -# Its documentation is combined with xgb.train. -# -#' @rdname xgb.train -#' @export -xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL, - params = list(), nrounds, - verbose = 1, print_every_n = 1L, - early_stopping_rounds = NULL, maximize = NULL, - save_period = NULL, save_name = "xgboost.model", - xgb_model = NULL, callbacks = list(), ...) { - - dtrain <- xgb.get.DMatrix(data, label, missing, weight) - - watchlist <- list(train = dtrain) - - bst <- xgb.train(params, dtrain, nrounds, watchlist, verbose = verbose, print_every_n = print_every_n, - early_stopping_rounds = early_stopping_rounds, maximize = maximize, - save_period = save_period, save_name = save_name, - xgb_model = xgb_model, callbacks = callbacks, ...) - return (bst) -} - -#' Training part from Mushroom Data Set -#' -#' This data set is originally from the Mushroom data set, -#' UCI Machine Learning Repository. -#' -#' This data set includes the following fields: -#' -#' \itemize{ -#' \item \code{label} the label for each record -#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns. -#' } -#' -#' @references -#' https://archive.ics.uci.edu/ml/datasets/Mushroom -#' -#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository -#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, -#' School of Information and Computer Science. -#' -#' @docType data -#' @keywords datasets -#' @name agaricus.train -#' @usage data(agaricus.train) -#' @format A list containing a label vector, and a dgCMatrix object with 6513 -#' rows and 127 variables -NULL - -#' Test part from Mushroom Data Set -#' -#' This data set is originally from the Mushroom data set, -#' UCI Machine Learning Repository. -#' -#' This data set includes the following fields: -#' -#' \itemize{ -#' \item \code{label} the label for each record -#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns. -#' } -#' -#' @references -#' https://archive.ics.uci.edu/ml/datasets/Mushroom -#' -#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository -#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, -#' School of Information and Computer Science. -#' -#' @docType data -#' @keywords datasets -#' @name agaricus.test -#' @usage data(agaricus.test) -#' @format A list containing a label vector, and a dgCMatrix object with 1611 -#' rows and 126 variables -NULL - -# Various imports -#' @importClassesFrom Matrix dgCMatrix dgeMatrix -#' @importFrom Matrix colSums -#' @importFrom Matrix sparse.model.matrix -#' @importFrom Matrix sparseVector -#' @importFrom Matrix sparseMatrix -#' @importFrom Matrix t -#' @importFrom data.table data.table -#' @importFrom data.table is.data.table -#' @importFrom data.table as.data.table -#' @importFrom data.table := -#' @importFrom data.table rbindlist -#' @importFrom data.table setkey -#' @importFrom data.table setkeyv -#' @importFrom data.table setnames -#' @importFrom magrittr %>% -#' @importFrom stringi stri_detect_regex -#' @importFrom stringi stri_match_first_regex -#' @importFrom stringi stri_replace_first_regex -#' @importFrom stringi stri_replace_all_regex -#' @importFrom stringi stri_split_regex -#' @importFrom utils object.size str tail -#' @importFrom stats predict -#' @importFrom stats median -#' @importFrom utils head -#' @importFrom graphics barplot -#' @importFrom graphics lines -#' @importFrom graphics points -#' @importFrom graphics grid -#' @importFrom graphics par -#' @importFrom graphics title -#' @importFrom grDevices rgb -#' -#' @import methods -#' @useDynLib xgboost, .registration = TRUE -NULL diff --git a/ml-xgboost/R-package/README.md b/ml-xgboost/R-package/README.md deleted file mode 100644 index c548731..0000000 --- a/ml-xgboost/R-package/README.md +++ /dev/null @@ -1,33 +0,0 @@ -XGBoost R Package for Scalable GBM -================================== - -[![CRAN Status Badge](http://www.r-pkg.org/badges/version/xgboost)](https://cran.r-project.org/web/packages/xgboost) -[![CRAN Downloads](http://cranlogs.r-pkg.org/badges/xgboost)](https://cran.rstudio.com/web/packages/xgboost/index.html) -[![Documentation Status](https://readthedocs.org/projects/xgboost/badge/?version=latest)](http://xgboost.readthedocs.org/en/latest/R-package/index.html) - -Resources ---------- -* [XGBoost R Package Online Documentation](http://xgboost.readthedocs.org/en/latest/R-package/index.html) - - Check this out for detailed documents, examples and tutorials. - -Installation ------------- - -We are [on CRAN](https://cran.r-project.org/web/packages/xgboost/index.html) now. For stable/pre-compiled(for Windows and OS X) version, please install from CRAN: - -```r -install.packages('xgboost') -``` - -For more detailed installation instructions, please see [here](http://xgboost.readthedocs.org/en/latest/build.html#r-package-installation). - -Examples --------- - -* Please visit [walk through example](demo). -* See also the [example scripts](../demo/kaggle-higgs) for Kaggle Higgs Challenge, including [speedtest script](../demo/kaggle-higgs/speedtest.R) on this dataset and the one related to [Otto challenge](../demo/kaggle-otto), including a [RMarkdown documentation](../demo/kaggle-otto/understandingXGBoostModel.Rmd). - -Development ------------ - -* See the [R Package section](https://xgboost.readthedocs.io/en/latest/contribute.html#r-package) of the contributors guide. diff --git a/ml-xgboost/R-package/cleanup b/ml-xgboost/R-package/cleanup deleted file mode 100644 index eb86699..0000000 --- a/ml-xgboost/R-package/cleanup +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -rm -f src/Makevars -rm -f CMakeLists.txt diff --git a/ml-xgboost/R-package/configure b/ml-xgboost/R-package/configure deleted file mode 100644 index 8dab660..0000000 --- a/ml-xgboost/R-package/configure +++ /dev/null @@ -1,3891 +0,0 @@ -#! /bin/sh -# Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for xgboost 0.6-3. -# -# -# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. -# -# -# This configure script is free software; the Free Software Foundation -# gives unlimited permission to copy, distribute and modify it. -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -# Use a proper internal environment variable to ensure we don't fall - # into an infinite loop, continuously re-executing ourselves. - if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then - _as_can_reexec=no; export _as_can_reexec; - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -as_fn_exit 255 - fi - # We don't want this to propagate to other subprocesses. - { _as_can_reexec=; unset _as_can_reexec;} -if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi -" - as_required="as_fn_return () { (exit \$1); } -as_fn_success () { as_fn_return 0; } -as_fn_failure () { as_fn_return 1; } -as_fn_ret_success () { return 0; } -as_fn_ret_failure () { return 1; } - -exitcode=0 -as_fn_success || { exitcode=1; echo as_fn_success failed.; } -as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } -as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } -as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : - -else - exitcode=1; echo positional parameters were not saved. -fi -test x\$exitcode = x0 || exit 1 -test -x / || exit 1" - as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO - as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO - eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && - test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1" - if (eval "$as_required") 2>/dev/null; then : - as_have_required=yes -else - as_have_required=no -fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : - -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -as_found=false -for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - as_found=: - case $as_dir in #( - /*) - for as_base in sh bash ksh sh5; do - # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base - if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : - CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : - break 2 -fi -fi - done;; - esac - as_found=false -done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } -IFS=$as_save_IFS - - - if test "x$CONFIG_SHELL" != x; then : - export CONFIG_SHELL - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -exit 255 -fi - - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." - else - $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, -$0: including any error possibly output before this -$0: message. Then install a modern shell, or manually run -$0: the script under such a shell if you do have one." - fi - exit 1 -fi -fi -fi -SHELL=${CONFIG_SHELL-/bin/sh} -export SHELL -# Unset more variables known to interfere with behavior of common tools. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS - -## --------------------- ## -## M4sh Shell Functions. ## -## --------------------- ## -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - - - as_lineno_1=$LINENO as_lineno_1a=$LINENO - as_lineno_2=$LINENO as_lineno_2a=$LINENO - eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && - test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { - # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } - - # If we had to re-execute with $CONFIG_SHELL, we're ensured to have - # already done that, so ensure we don't try to do so again and fall - # in an infinite loop. This has already happened in practice. - _as_can_reexec=no; export _as_can_reexec - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit -} - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -test -n "$DJDIR" || exec 7<&0 &1 - -# Name of the host. -# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, -# so uname gets run too. -ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` - -# -# Initializations. -# -ac_default_prefix=/usr/local -ac_clean_files= -ac_config_libobj_dir=. -LIBOBJS= -cross_compiling=no -subdirs= -MFLAGS= -MAKEFLAGS= - -# Identity of this package. -PACKAGE_NAME='xgboost' -PACKAGE_TARNAME='xgboost' -PACKAGE_VERSION='0.6-3' -PACKAGE_STRING='xgboost 0.6-3' -PACKAGE_BUGREPORT='' -PACKAGE_URL='' - -ac_subst_vars='LTLIBOBJS -LIBOBJS -BACKTRACE_LIB -ENDIAN_FLAG -OPENMP_LIB -OPENMP_CXXFLAGS -OBJEXT -EXEEXT -ac_ct_CC -CPPFLAGS -LDFLAGS -CFLAGS -CC -target_alias -host_alias -build_alias -LIBS -ECHO_T -ECHO_N -ECHO_C -DEFS -mandir -localedir -libdir -psdir -pdfdir -dvidir -htmldir -infodir -docdir -oldincludedir -includedir -localstatedir -sharedstatedir -sysconfdir -datadir -datarootdir -libexecdir -sbindir -bindir -program_transform_name -prefix -exec_prefix -PACKAGE_URL -PACKAGE_BUGREPORT -PACKAGE_STRING -PACKAGE_VERSION -PACKAGE_TARNAME -PACKAGE_NAME -PATH_SEPARATOR -SHELL' -ac_subst_files='' -ac_user_opts=' -enable_option_checking -' - ac_precious_vars='build_alias -host_alias -target_alias -CC -CFLAGS -LDFLAGS -LIBS -CPPFLAGS' - - -# Initialize some variables set by options. -ac_init_help= -ac_init_version=false -ac_unrecognized_opts= -ac_unrecognized_sep= -# The variables have the same names as the options, with -# dashes changed to underlines. -cache_file=/dev/null -exec_prefix=NONE -no_create= -no_recursion= -prefix=NONE -program_prefix=NONE -program_suffix=NONE -program_transform_name=s,x,x, -silent= -site= -srcdir= -verbose= -x_includes=NONE -x_libraries=NONE - -# Installation directory options. -# These are left unexpanded so users can "make install exec_prefix=/foo" -# and all the variables that are supposed to be based on exec_prefix -# by default will actually change. -# Use braces instead of parens because sh, perl, etc. also accept them. -# (The list follows the same order as the GNU Coding Standards.) -bindir='${exec_prefix}/bin' -sbindir='${exec_prefix}/sbin' -libexecdir='${exec_prefix}/libexec' -datarootdir='${prefix}/share' -datadir='${datarootdir}' -sysconfdir='${prefix}/etc' -sharedstatedir='${prefix}/com' -localstatedir='${prefix}/var' -includedir='${prefix}/include' -oldincludedir='/usr/include' -docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' -infodir='${datarootdir}/info' -htmldir='${docdir}' -dvidir='${docdir}' -pdfdir='${docdir}' -psdir='${docdir}' -libdir='${exec_prefix}/lib' -localedir='${datarootdir}/locale' -mandir='${datarootdir}/man' - -ac_prev= -ac_dashdash= -for ac_option -do - # If the previous option needs an argument, assign it. - if test -n "$ac_prev"; then - eval $ac_prev=\$ac_option - ac_prev= - continue - fi - - case $ac_option in - *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; - *=) ac_optarg= ;; - *) ac_optarg=yes ;; - esac - - # Accept the important Cygnus configure options, so we can diagnose typos. - - case $ac_dashdash$ac_option in - --) - ac_dashdash=yes ;; - - -bindir | --bindir | --bindi | --bind | --bin | --bi) - ac_prev=bindir ;; - -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) - bindir=$ac_optarg ;; - - -build | --build | --buil | --bui | --bu) - ac_prev=build_alias ;; - -build=* | --build=* | --buil=* | --bui=* | --bu=*) - build_alias=$ac_optarg ;; - - -cache-file | --cache-file | --cache-fil | --cache-fi \ - | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) - ac_prev=cache_file ;; - -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ - | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) - cache_file=$ac_optarg ;; - - --config-cache | -C) - cache_file=config.cache ;; - - -datadir | --datadir | --datadi | --datad) - ac_prev=datadir ;; - -datadir=* | --datadir=* | --datadi=* | --datad=*) - datadir=$ac_optarg ;; - - -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ - | --dataroo | --dataro | --datar) - ac_prev=datarootdir ;; - -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ - | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) - datarootdir=$ac_optarg ;; - - -disable-* | --disable-*) - ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=no ;; - - -docdir | --docdir | --docdi | --doc | --do) - ac_prev=docdir ;; - -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) - docdir=$ac_optarg ;; - - -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) - ac_prev=dvidir ;; - -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) - dvidir=$ac_optarg ;; - - -enable-* | --enable-*) - ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=\$ac_optarg ;; - - -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ - | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ - | --exec | --exe | --ex) - ac_prev=exec_prefix ;; - -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ - | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ - | --exec=* | --exe=* | --ex=*) - exec_prefix=$ac_optarg ;; - - -gas | --gas | --ga | --g) - # Obsolete; use --with-gas. - with_gas=yes ;; - - -help | --help | --hel | --he | -h) - ac_init_help=long ;; - -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) - ac_init_help=recursive ;; - -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) - ac_init_help=short ;; - - -host | --host | --hos | --ho) - ac_prev=host_alias ;; - -host=* | --host=* | --hos=* | --ho=*) - host_alias=$ac_optarg ;; - - -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) - ac_prev=htmldir ;; - -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ - | --ht=*) - htmldir=$ac_optarg ;; - - -includedir | --includedir | --includedi | --included | --include \ - | --includ | --inclu | --incl | --inc) - ac_prev=includedir ;; - -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ - | --includ=* | --inclu=* | --incl=* | --inc=*) - includedir=$ac_optarg ;; - - -infodir | --infodir | --infodi | --infod | --info | --inf) - ac_prev=infodir ;; - -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) - infodir=$ac_optarg ;; - - -libdir | --libdir | --libdi | --libd) - ac_prev=libdir ;; - -libdir=* | --libdir=* | --libdi=* | --libd=*) - libdir=$ac_optarg ;; - - -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ - | --libexe | --libex | --libe) - ac_prev=libexecdir ;; - -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ - | --libexe=* | --libex=* | --libe=*) - libexecdir=$ac_optarg ;; - - -localedir | --localedir | --localedi | --localed | --locale) - ac_prev=localedir ;; - -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) - localedir=$ac_optarg ;; - - -localstatedir | --localstatedir | --localstatedi | --localstated \ - | --localstate | --localstat | --localsta | --localst | --locals) - ac_prev=localstatedir ;; - -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ - | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) - localstatedir=$ac_optarg ;; - - -mandir | --mandir | --mandi | --mand | --man | --ma | --m) - ac_prev=mandir ;; - -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) - mandir=$ac_optarg ;; - - -nfp | --nfp | --nf) - # Obsolete; use --without-fp. - with_fp=no ;; - - -no-create | --no-create | --no-creat | --no-crea | --no-cre \ - | --no-cr | --no-c | -n) - no_create=yes ;; - - -no-recursion | --no-recursion | --no-recursio | --no-recursi \ - | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) - no_recursion=yes ;; - - -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ - | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ - | --oldin | --oldi | --old | --ol | --o) - ac_prev=oldincludedir ;; - -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ - | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ - | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) - oldincludedir=$ac_optarg ;; - - -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) - ac_prev=prefix ;; - -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) - prefix=$ac_optarg ;; - - -program-prefix | --program-prefix | --program-prefi | --program-pref \ - | --program-pre | --program-pr | --program-p) - ac_prev=program_prefix ;; - -program-prefix=* | --program-prefix=* | --program-prefi=* \ - | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) - program_prefix=$ac_optarg ;; - - -program-suffix | --program-suffix | --program-suffi | --program-suff \ - | --program-suf | --program-su | --program-s) - ac_prev=program_suffix ;; - -program-suffix=* | --program-suffix=* | --program-suffi=* \ - | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) - program_suffix=$ac_optarg ;; - - -program-transform-name | --program-transform-name \ - | --program-transform-nam | --program-transform-na \ - | --program-transform-n | --program-transform- \ - | --program-transform | --program-transfor \ - | --program-transfo | --program-transf \ - | --program-trans | --program-tran \ - | --progr-tra | --program-tr | --program-t) - ac_prev=program_transform_name ;; - -program-transform-name=* | --program-transform-name=* \ - | --program-transform-nam=* | --program-transform-na=* \ - | --program-transform-n=* | --program-transform-=* \ - | --program-transform=* | --program-transfor=* \ - | --program-transfo=* | --program-transf=* \ - | --program-trans=* | --program-tran=* \ - | --progr-tra=* | --program-tr=* | --program-t=*) - program_transform_name=$ac_optarg ;; - - -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) - ac_prev=pdfdir ;; - -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) - pdfdir=$ac_optarg ;; - - -psdir | --psdir | --psdi | --psd | --ps) - ac_prev=psdir ;; - -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) - psdir=$ac_optarg ;; - - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - silent=yes ;; - - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) - ac_prev=sbindir ;; - -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ - | --sbi=* | --sb=*) - sbindir=$ac_optarg ;; - - -sharedstatedir | --sharedstatedir | --sharedstatedi \ - | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ - | --sharedst | --shareds | --shared | --share | --shar \ - | --sha | --sh) - ac_prev=sharedstatedir ;; - -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ - | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ - | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ - | --sha=* | --sh=*) - sharedstatedir=$ac_optarg ;; - - -site | --site | --sit) - ac_prev=site ;; - -site=* | --site=* | --sit=*) - site=$ac_optarg ;; - - -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) - ac_prev=srcdir ;; - -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) - srcdir=$ac_optarg ;; - - -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ - | --syscon | --sysco | --sysc | --sys | --sy) - ac_prev=sysconfdir ;; - -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ - | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) - sysconfdir=$ac_optarg ;; - - -target | --target | --targe | --targ | --tar | --ta | --t) - ac_prev=target_alias ;; - -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) - target_alias=$ac_optarg ;; - - -v | -verbose | --verbose | --verbos | --verbo | --verb) - verbose=yes ;; - - -version | --version | --versio | --versi | --vers | -V) - ac_init_version=: ;; - - -with-* | --with-*) - ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=\$ac_optarg ;; - - -without-* | --without-*) - ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=no ;; - - --x) - # Obsolete; use --with-x. - with_x=yes ;; - - -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ - | --x-incl | --x-inc | --x-in | --x-i) - ac_prev=x_includes ;; - -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ - | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) - x_includes=$ac_optarg ;; - - -x-libraries | --x-libraries | --x-librarie | --x-librari \ - | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) - ac_prev=x_libraries ;; - -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ - | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) - x_libraries=$ac_optarg ;; - - -*) as_fn_error $? "unrecognized option: \`$ac_option' -Try \`$0 --help' for more information" - ;; - - *=*) - ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` - # Reject names that are not valid shell variable names. - case $ac_envvar in #( - '' | [0-9]* | *[!_$as_cr_alnum]* ) - as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; - esac - eval $ac_envvar=\$ac_optarg - export $ac_envvar ;; - - *) - # FIXME: should be removed in autoconf 3.0. - $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 - expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 - : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" - ;; - - esac -done - -if test -n "$ac_prev"; then - ac_option=--`echo $ac_prev | sed 's/_/-/g'` - as_fn_error $? "missing argument to $ac_option" -fi - -if test -n "$ac_unrecognized_opts"; then - case $enable_option_checking in - no) ;; - fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; - *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; - esac -fi - -# Check all directory arguments for consistency. -for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ - datadir sysconfdir sharedstatedir localstatedir includedir \ - oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir -do - eval ac_val=\$$ac_var - # Remove trailing slashes. - case $ac_val in - */ ) - ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` - eval $ac_var=\$ac_val;; - esac - # Be sure to have absolute directory names. - case $ac_val in - [\\/$]* | ?:[\\/]* ) continue;; - NONE | '' ) case $ac_var in *prefix ) continue;; esac;; - esac - as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" -done - -# There might be people who depend on the old broken behavior: `$host' -# used to hold the argument of --host etc. -# FIXME: To remove some day. -build=$build_alias -host=$host_alias -target=$target_alias - -# FIXME: To remove some day. -if test "x$host_alias" != x; then - if test "x$build_alias" = x; then - cross_compiling=maybe - elif test "x$build_alias" != "x$host_alias"; then - cross_compiling=yes - fi -fi - -ac_tool_prefix= -test -n "$host_alias" && ac_tool_prefix=$host_alias- - -test "$silent" = yes && exec 6>/dev/null - - -ac_pwd=`pwd` && test -n "$ac_pwd" && -ac_ls_di=`ls -di .` && -ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - as_fn_error $? "working directory cannot be determined" -test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - as_fn_error $? "pwd does not report name of working directory" - - -# Find the source files, if location was not specified. -if test -z "$srcdir"; then - ac_srcdir_defaulted=yes - # Try the directory containing this script, then the parent directory. - ac_confdir=`$as_dirname -- "$as_myself" || -$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_myself" : 'X\(//\)[^/]' \| \ - X"$as_myself" : 'X\(//\)$' \| \ - X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_myself" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - srcdir=$ac_confdir - if test ! -r "$srcdir/$ac_unique_file"; then - srcdir=.. - fi -else - ac_srcdir_defaulted=no -fi -if test ! -r "$srcdir/$ac_unique_file"; then - test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" -fi -ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" -ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" - pwd)` -# When building in place, set srcdir=. -if test "$ac_abs_confdir" = "$ac_pwd"; then - srcdir=. -fi -# Remove unnecessary trailing slashes from srcdir. -# Double slashes in file names in object file debugging info -# mess up M-x gdb in Emacs. -case $srcdir in -*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; -esac -for ac_var in $ac_precious_vars; do - eval ac_env_${ac_var}_set=\${${ac_var}+set} - eval ac_env_${ac_var}_value=\$${ac_var} - eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} - eval ac_cv_env_${ac_var}_value=\$${ac_var} -done - -# -# Report the --help message. -# -if test "$ac_init_help" = "long"; then - # Omit some internal or obsolete options to make the list less imposing. - # This message is too long to be a string in the A/UX 3.1 sh. - cat <<_ACEOF -\`configure' configures xgboost 0.6-3 to adapt to many kinds of systems. - -Usage: $0 [OPTION]... [VAR=VALUE]... - -To assign environment variables (e.g., CC, CFLAGS...), specify them as -VAR=VALUE. See below for descriptions of some of the useful variables. - -Defaults for the options are specified in brackets. - -Configuration: - -h, --help display this help and exit - --help=short display options specific to this package - --help=recursive display the short help of all the included packages - -V, --version display version information and exit - -q, --quiet, --silent do not print \`checking ...' messages - --cache-file=FILE cache test results in FILE [disabled] - -C, --config-cache alias for \`--cache-file=config.cache' - -n, --no-create do not create output files - --srcdir=DIR find the sources in DIR [configure dir or \`..'] - -Installation directories: - --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] - --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] - -By default, \`make install' will install all the files in -\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify -an installation prefix other than \`$ac_default_prefix' using \`--prefix', -for instance \`--prefix=\$HOME'. - -For better control, use the options below. - -Fine tuning of the installation directories: - --bindir=DIR user executables [EPREFIX/bin] - --sbindir=DIR system admin executables [EPREFIX/sbin] - --libexecdir=DIR program executables [EPREFIX/libexec] - --sysconfdir=DIR read-only single-machine data [PREFIX/etc] - --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] - --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --libdir=DIR object code libraries [EPREFIX/lib] - --includedir=DIR C header files [PREFIX/include] - --oldincludedir=DIR C header files for non-gcc [/usr/include] - --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] - --datadir=DIR read-only architecture-independent data [DATAROOTDIR] - --infodir=DIR info documentation [DATAROOTDIR/info] - --localedir=DIR locale-dependent data [DATAROOTDIR/locale] - --mandir=DIR man documentation [DATAROOTDIR/man] - --docdir=DIR documentation root [DATAROOTDIR/doc/xgboost] - --htmldir=DIR html documentation [DOCDIR] - --dvidir=DIR dvi documentation [DOCDIR] - --pdfdir=DIR pdf documentation [DOCDIR] - --psdir=DIR ps documentation [DOCDIR] -_ACEOF - - cat <<\_ACEOF -_ACEOF -fi - -if test -n "$ac_init_help"; then - case $ac_init_help in - short | recursive ) echo "Configuration of xgboost 0.6-3:";; - esac - cat <<\_ACEOF - -Some influential environment variables: - CC C compiler command - CFLAGS C compiler flags - LDFLAGS linker flags, e.g. -L if you have libraries in a - nonstandard directory - LIBS libraries to pass to the linker, e.g. -l - CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if - you have headers in a nonstandard directory - -Use these variables to override the choices made by `configure' or to help -it to find libraries and programs with nonstandard names/locations. - -Report bugs to the package provider. -_ACEOF -ac_status=$? -fi - -if test "$ac_init_help" = "recursive"; then - # If there are subdirs, report their specific --help. - for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue - test -d "$ac_dir" || - { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || - continue - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - cd "$ac_dir" || { ac_status=$?; continue; } - # Check for guested configure. - if test -f "$ac_srcdir/configure.gnu"; then - echo && - $SHELL "$ac_srcdir/configure.gnu" --help=recursive - elif test -f "$ac_srcdir/configure"; then - echo && - $SHELL "$ac_srcdir/configure" --help=recursive - else - $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 - fi || ac_status=$? - cd "$ac_pwd" || { ac_status=$?; break; } - done -fi - -test -n "$ac_init_help" && exit $ac_status -if $ac_init_version; then - cat <<\_ACEOF -xgboost configure 0.6-3 -generated by GNU Autoconf 2.69 - -Copyright (C) 2012 Free Software Foundation, Inc. -This configure script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it. -_ACEOF - exit -fi - -## ------------------------ ## -## Autoconf initialization. ## -## ------------------------ ## - -# ac_fn_c_try_compile LINENO -# -------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_compile - -# ac_fn_c_try_link LINENO -# ----------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_link () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - test -x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_link - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_run -cat >config.log <<_ACEOF -This file contains any messages produced by compilers while -running configure, to aid debugging if configure makes a mistake. - -It was created by xgboost $as_me 0.6-3, which was -generated by GNU Autoconf 2.69. Invocation command line was - - $ $0 $@ - -_ACEOF -exec 5>>config.log -{ -cat <<_ASUNAME -## --------- ## -## Platform. ## -## --------- ## - -hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` - -/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` -/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` -/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` -/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` - -_ASUNAME - -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" - done -IFS=$as_save_IFS - -} >&5 - -cat >&5 <<_ACEOF - - -## ----------- ## -## Core tests. ## -## ----------- ## - -_ACEOF - - -# Keep a trace of the command line. -# Strip out --no-create and --no-recursion so they do not pile up. -# Strip out --silent because we don't want to record it for future runs. -# Also quote any args containing shell meta-characters. -# Make two passes to allow for proper duplicate-argument suppression. -ac_configure_args= -ac_configure_args0= -ac_configure_args1= -ac_must_keep_next=false -for ac_pass in 1 2 -do - for ac_arg - do - case $ac_arg in - -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - continue ;; - *\'*) - ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - case $ac_pass in - 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; - 2) - as_fn_append ac_configure_args1 " '$ac_arg'" - if test $ac_must_keep_next = true; then - ac_must_keep_next=false # Got value, back to normal. - else - case $ac_arg in - *=* | --config-cache | -C | -disable-* | --disable-* \ - | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ - | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ - | -with-* | --with-* | -without-* | --without-* | --x) - case "$ac_configure_args0 " in - "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; - esac - ;; - -* ) ac_must_keep_next=true ;; - esac - fi - as_fn_append ac_configure_args " '$ac_arg'" - ;; - esac - done -done -{ ac_configure_args0=; unset ac_configure_args0;} -{ ac_configure_args1=; unset ac_configure_args1;} - -# When interrupted or exit'd, cleanup temporary files, and complete -# config.log. We remove comments because anyway the quotes in there -# would cause problems or look ugly. -# WARNING: Use '\'' to represent an apostrophe within the trap. -# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. -trap 'exit_status=$? - # Save into config.log some information that might help in debugging. - { - echo - - $as_echo "## ---------------- ## -## Cache variables. ## -## ---------------- ##" - echo - # The following way of writing the cache mishandles newlines in values, -( - for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - (set) 2>&1 | - case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - sed -n \ - "s/'\''/'\''\\\\'\'''\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" - ;; #( - *) - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) - echo - - $as_echo "## ----------------- ## -## Output variables. ## -## ----------------- ##" - echo - for ac_var in $ac_subst_vars - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - - if test -n "$ac_subst_files"; then - $as_echo "## ------------------- ## -## File substitutions. ## -## ------------------- ##" - echo - for ac_var in $ac_subst_files - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - fi - - if test -s confdefs.h; then - $as_echo "## ----------- ## -## confdefs.h. ## -## ----------- ##" - echo - cat confdefs.h - echo - fi - test "$ac_signal" != 0 && - $as_echo "$as_me: caught signal $ac_signal" - $as_echo "$as_me: exit $exit_status" - } >&5 - rm -f core *.core core.conftest.* && - rm -f -r conftest* confdefs* conf$$* $ac_clean_files && - exit $exit_status -' 0 -for ac_signal in 1 2 13 15; do - trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal -done -ac_signal=0 - -# confdefs.h avoids OS command line length limits that DEFS can exceed. -rm -f -r conftest* confdefs.h - -$as_echo "/* confdefs.h */" > confdefs.h - -# Predefined preprocessor variables. - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_NAME "$PACKAGE_NAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_TARNAME "$PACKAGE_TARNAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_VERSION "$PACKAGE_VERSION" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_STRING "$PACKAGE_STRING" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" -_ACEOF - - -# Let the site file select an alternate cache file if it wants to. -# Prefer an explicitly selected file to automatically selected ones. -ac_site_file1=NONE -ac_site_file2=NONE -if test -n "$CONFIG_SITE"; then - # We do not want a PATH search for config.site. - case $CONFIG_SITE in #(( - -*) ac_site_file1=./$CONFIG_SITE;; - */*) ac_site_file1=$CONFIG_SITE;; - *) ac_site_file1=./$CONFIG_SITE;; - esac -elif test "x$prefix" != xNONE; then - ac_site_file1=$prefix/share/config.site - ac_site_file2=$prefix/etc/config.site -else - ac_site_file1=$ac_default_prefix/share/config.site - ac_site_file2=$ac_default_prefix/etc/config.site -fi -for ac_site_file in "$ac_site_file1" "$ac_site_file2" -do - test "x$ac_site_file" = xNONE && continue - if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 -$as_echo "$as_me: loading site script $ac_site_file" >&6;} - sed 's/^/| /' "$ac_site_file" >&5 - . "$ac_site_file" \ - || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "failed to load site script $ac_site_file -See \`config.log' for more details" "$LINENO" 5; } - fi -done - -if test -r "$cache_file"; then - # Some versions of bash will fail to source /dev/null (special files - # actually), so we avoid doing that. DJGPP emulates it as a regular file. - if test /dev/null != "$cache_file" && test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 -$as_echo "$as_me: loading cache $cache_file" >&6;} - case $cache_file in - [\\/]* | ?:[\\/]* ) . "$cache_file";; - *) . "./$cache_file";; - esac - fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 -$as_echo "$as_me: creating cache $cache_file" >&6;} - >$cache_file -fi - -# Check that the precious variables saved in the cache have kept the same -# value. -ac_cache_corrupted=false -for ac_var in $ac_precious_vars; do - eval ac_old_set=\$ac_cv_env_${ac_var}_set - eval ac_new_set=\$ac_env_${ac_var}_set - eval ac_old_val=\$ac_cv_env_${ac_var}_value - eval ac_new_val=\$ac_env_${ac_var}_value - case $ac_old_set,$ac_new_set in - set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,);; - *) - if test "x$ac_old_val" != "x$ac_new_val"; then - # differences in whitespace do not lead to failure. - ac_old_val_w=`echo x $ac_old_val` - ac_new_val_w=`echo x $ac_new_val` - if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 -$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - ac_cache_corrupted=: - else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 -$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} - eval $ac_var=\$ac_old_val - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 -$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 -$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} - fi;; - esac - # Pass precious variables to config.status. - if test "$ac_new_set" = set; then - case $ac_new_val in - *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; - *) ac_arg=$ac_var=$ac_new_val ;; - esac - case " $ac_configure_args " in - *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; - esac - fi -done -if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 -$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 -fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - -# Use this line to set CC variable to a C compiler -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. -set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_CC"; then - ac_ct_CC=$CC - # Extract the first word of "gcc", so it can be a program name with args. -set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -else - CC="$ac_cv_prog_CC" -fi - -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. -set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - fi -fi -if test -z "$CC"; then - # Extract the first word of "cc", so it can be a program name with args. -set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else - ac_prog_rejected=no -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then - ac_prog_rejected=yes - continue - fi - ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -if test $ac_prog_rejected = yes; then - # We found a bogon in the path, so make sure we never use it. - set dummy $ac_cv_prog_CC - shift - if test $# != 0; then - # We chose a different compiler from the bogus one. - # However, it has the same basename, so the bogon will be chosen - # first if we set CC to just the basename; use the full file name. - shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" - fi -fi -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - for ac_prog in cl.exe - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$CC" && break - done -fi -if test -z "$CC"; then - ac_ct_CC=$CC - for ac_prog in cl.exe -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_CC" && break -done - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -fi - -fi - - -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "no acceptable C compiler found in \$PATH -See \`config.log' for more details" "$LINENO" 5; } - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done - -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" -# Try to create an executable without -o first, disregard a.out. -# It will help us diagnose broken compilers, and finding out an intuition -# of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } -ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` - -# The possible output files: -ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" - -ac_rmfiles= -for ac_file in $ac_files -do - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - * ) ac_rmfiles="$ac_rmfiles $ac_file";; - esac -done -rm -f $ac_rmfiles - -if { { ac_try="$ac_link_default" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link_default") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. -# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' -# in a Makefile. We should not override ac_cv_exeext if it was cached, -# so that the user can short-circuit this test for compilers unknown to -# Autoconf. -for ac_file in $ac_files '' -do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) - ;; - [ab].out ) - # We found the default executable, but exeext='' is most - # certainly right. - break;; - *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; - then :; else - ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - fi - # We set ac_cv_exeext here because the later test for it is not - # safe: cross compilers may not add the suffix if given an `-o' - # argument, so we may need to know it at that point already. - # Even if this section looks crufty: it has the advantage of - # actually working. - break;; - * ) - break;; - esac -done -test "$ac_cv_exeext" = no && ac_cv_exeext= - -else - ac_file='' -fi -if test -z "$ac_file"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -$as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "C compiler cannot create executables -See \`config.log' for more details" "$LINENO" 5; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } -ac_exeext=$ac_cv_exeext - -rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 -$as_echo_n "checking for suffix of executables... " >&6; } -if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # If both `conftest.exe' and `conftest' are `present' (well, observable) -# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will -# work properly (i.e., refer to `conftest.exe'), while it won't with -# `rm'. -for ac_file in conftest.exe conftest conftest.*; do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - break;; - * ) break;; - esac -done -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 -$as_echo "$ac_cv_exeext" >&6; } - -rm -f conftest.$ac_ext -EXEEXT=$ac_cv_exeext -ac_exeext=$EXEEXT -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -FILE *f = fopen ("conftest.out", "w"); - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -ac_clean_files="$ac_clean_files conftest.out" -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } -if test "$cross_compiling" != yes; then - { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if { ac_try='./conftest$ac_cv_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - cross_compiling=no - else - if test "$cross_compiling" = maybe; then - cross_compiling=yes - else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot run C compiled programs. -If you meant to cross compile, use \`--host'. -See \`config.log' for more details" "$LINENO" 5; } - fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } - -rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 -$as_echo_n "checking for suffix of object files... " >&6; } -if ${ac_cv_objext+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.o conftest.obj -if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - for ac_file in conftest.o conftest.obj conftest.*; do - test -f "$ac_file" || continue; - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; - *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` - break;; - esac -done -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of object files: cannot compile -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest.$ac_cv_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 -$as_echo "$ac_cv_objext" >&6; } -OBJEXT=$ac_cv_objext -ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if ${ac_cv_c_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_c_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } -if test $ac_compiler_gnu = yes; then - GCC=yes -else - GCC= -fi -ac_test_CFLAGS=${CFLAGS+set} -ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if ${ac_cv_prog_cc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_save_c_werror_flag=$ac_c_werror_flag - ac_c_werror_flag=yes - ac_cv_prog_cc_g=no - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - ac_c_werror_flag=$ac_save_c_werror_flag - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_c_werror_flag=$ac_save_c_werror_flag -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then - CFLAGS=$ac_save_CFLAGS -elif test $ac_cv_prog_cc_g = yes; then - if test "$GCC" = yes; then - CFLAGS="-g -O2" - else - CFLAGS="-g" - fi -else - if test "$GCC" = yes; then - CFLAGS="-O2" - else - CFLAGS= - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if ${ac_cv_prog_cc_c89+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -struct stat; -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; - -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; - -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} -_ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c89=$ac_arg -fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c89" != "xno" && break -done -rm -f conftest.$ac_ext -CC=$ac_save_CC - -fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : - -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -### Check whether backtrace() is part of libc or the external lib libexecinfo -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking Backtrace lib" >&5 -$as_echo_n "checking Backtrace lib... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 -$as_echo "" >&6; } - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for backtrace in -lexecinfo" >&5 -$as_echo_n "checking for backtrace in -lexecinfo... " >&6; } -if ${ac_cv_lib_execinfo_backtrace+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lexecinfo $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char backtrace (); -int -main () -{ -return backtrace (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_execinfo_backtrace=yes -else - ac_cv_lib_execinfo_backtrace=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_execinfo_backtrace" >&5 -$as_echo "$ac_cv_lib_execinfo_backtrace" >&6; } -if test "x$ac_cv_lib_execinfo_backtrace" = xyes; then : - BACKTRACE_LIB=-lexecinfo -else - BACKTRACE_LIB='' -fi - - -### Endian detection -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking endian" >&5 -$as_echo_n "checking endian... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 -$as_echo "" >&6; } -if test "$cross_compiling" = yes; then : - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot run test program while cross compiling -See \`config.log' for more details" "$LINENO" 5; } -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -const uint16_t endianness = 256; return !!(*(const uint8_t *)&endianness); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=1" -else - ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=0" -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - - -OPENMP_CXXFLAGS="" - -if test `uname -s` = "Linux" -then - OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)" -fi - -if test `uname -s` = "Darwin" -then - OPENMP_CXXFLAGS='-Xclang -fopenmp' - OPENMP_LIB='/usr/local/lib/libomp.dylib' - ac_pkg_openmp=no - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether OpenMP will work in a package" >&5 -$as_echo_n "checking whether OpenMP will work in a package... " >&6; } - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ - return (omp_get_max_threads() <= 1); - ; - return 0; -} -_ACEOF - ${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes - { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${ac_pkg_openmp}" >&5 -$as_echo "${ac_pkg_openmp}" >&6; } - if test "${ac_pkg_openmp}" = no; then - OPENMP_CXXFLAGS='' - OPENMP_LIB='' - echo '*****************************************************************************************' - echo 'WARNING: OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.' - echo ' To use all CPU cores for training jobs, you should install OpenMP by running\n' - echo ' brew install libomp' - echo '*****************************************************************************************' - fi -fi - - - - - -ac_config_files="$ac_config_files src/Makevars" - -cat >confcache <<\_ACEOF -# This file is a shell script that caches the results of configure -# tests run on this system so they can be shared between configure -# scripts and configure runs, see configure's option --config-cache. -# It is not useful on other systems. If it contains results you don't -# want to keep, you may remove or edit it. -# -# config.status only pays attention to the cache file if you give it -# the --recheck option to rerun configure. -# -# `ac_cv_env_foo' variables (set or unset) will be overridden when -# loading this file, other *unset* `ac_cv_foo' will be assigned the -# following values. - -_ACEOF - -# The following way of writing the cache mishandles newlines in values, -# but we know of no workaround that is simple, portable, and efficient. -# So, we kill variables containing newlines. -# Ultrix sh set writes to stderr and can't be redirected directly, -# and sets the high bit in the cache file unless we assign to the vars. -( - for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - - (set) 2>&1 | - case $as_nl`(ac_space=' '; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes: double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \. - sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" - ;; #( - *) - # `set' quotes correctly as required by POSIX, so do not add quotes. - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) | - sed ' - /^ac_cv_env_/b end - t clear - :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ - t end - s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ - :end' >>confcache -if diff "$cache_file" confcache >/dev/null 2>&1; then :; else - if test -w "$cache_file"; then - if test "x$cache_file" != "x/dev/null"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 -$as_echo "$as_me: updating cache $cache_file" >&6;} - if test ! -f "$cache_file" || test -h "$cache_file"; then - cat confcache >"$cache_file" - else - case $cache_file in #( - */* | ?:*) - mv -f confcache "$cache_file"$$ && - mv -f "$cache_file"$$ "$cache_file" ;; #( - *) - mv -f confcache "$cache_file" ;; - esac - fi - fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 -$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} - fi -fi -rm -f confcache - -test "x$prefix" = xNONE && prefix=$ac_default_prefix -# Let make expand exec_prefix. -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' - -# Transform confdefs.h into DEFS. -# Protect against shell expansion while executing Makefile rules. -# Protect against Makefile macro expansion. -# -# If the first sed substitution is executed (which looks for macros that -# take arguments), then branch to the quote section. Otherwise, -# look for a macro that doesn't take arguments. -ac_script=' -:mline -/\\$/{ - N - s,\\\n,, - b mline -} -t clear -:clear -s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g -t quote -s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g -t quote -b any -:quote -s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g -s/\[/\\&/g -s/\]/\\&/g -s/\$/$$/g -H -:any -${ - g - s/^\n// - s/\n/ /g - p -} -' -DEFS=`sed -n "$ac_script" confdefs.h` - - -ac_libobjs= -ac_ltlibobjs= -U= -for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue - # 1. Remove the extension, and $U if already installed. - ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`$as_echo "$ac_i" | sed "$ac_script"` - # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR - # will be set to the directory where LIBOBJS objects are built. - as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" - as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' -done -LIBOBJS=$ac_libobjs - -LTLIBOBJS=$ac_ltlibobjs - - - -: "${CONFIG_STATUS=./config.status}" -ac_write_fail=0 -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 -$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} -as_write_fail=0 -cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 -#! $SHELL -# Generated by $as_me. -# Run this file to recreate the current configuration. -# Compiler output produced by configure, useful for debugging -# configure, is in config.log if it exists. - -debug=false -ac_cs_recheck=false -ac_cs_silent=false - -SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -exec 6>&1 -## ----------------------------------- ## -## Main body of $CONFIG_STATUS script. ## -## ----------------------------------- ## -_ASEOF -test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# Save the log message, to keep $0 and so on meaningful, and to -# report actual input values of CONFIG_FILES etc. instead of their -# values after options handling. -ac_log=" -This file was extended by xgboost $as_me 0.6-3, which was -generated by GNU Autoconf 2.69. Invocation command line was - - CONFIG_FILES = $CONFIG_FILES - CONFIG_HEADERS = $CONFIG_HEADERS - CONFIG_LINKS = $CONFIG_LINKS - CONFIG_COMMANDS = $CONFIG_COMMANDS - $ $0 $@ - -on `(hostname || uname -n) 2>/dev/null | sed 1q` -" - -_ACEOF - -case $ac_config_files in *" -"*) set x $ac_config_files; shift; ac_config_files=$*;; -esac - - - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# Files that config.status was made for. -config_files="$ac_config_files" - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -ac_cs_usage="\ -\`$as_me' instantiates files and other configuration actions -from templates according to the current configuration. Unless the files -and actions are specified as TAGs, all are instantiated by default. - -Usage: $0 [OPTION]... [TAG]... - - -h, --help print this help, then exit - -V, --version print version number and configuration settings, then exit - --config print configuration, then exit - -q, --quiet, --silent - do not print progress messages - -d, --debug don't remove temporary files - --recheck update $as_me by reconfiguring in the same conditions - --file=FILE[:TEMPLATE] - instantiate the configuration file FILE - -Configuration files: -$config_files - -Report bugs to the package provider." - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" -ac_cs_version="\\ -xgboost config.status 0.6-3 -configured by $0, generated by GNU Autoconf 2.69, - with options \\"\$ac_cs_config\\" - -Copyright (C) 2012 Free Software Foundation, Inc. -This config.status script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it." - -ac_pwd='$ac_pwd' -srcdir='$srcdir' -test -n "\$AWK" || AWK=awk -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# The default lists apply if the user does not specify any file. -ac_need_defaults=: -while test $# != 0 -do - case $1 in - --*=?*) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` - ac_shift=: - ;; - --*=) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg= - ac_shift=: - ;; - *) - ac_option=$1 - ac_optarg=$2 - ac_shift=shift - ;; - esac - - case $ac_option in - # Handling of the options. - -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) - ac_cs_recheck=: ;; - --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - $as_echo "$ac_cs_version"; exit ;; - --config | --confi | --conf | --con | --co | --c ) - $as_echo "$ac_cs_config"; exit ;; - --debug | --debu | --deb | --de | --d | -d ) - debug=: ;; - --file | --fil | --fi | --f ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - '') as_fn_error $? "missing file argument" ;; - esac - as_fn_append CONFIG_FILES " '$ac_optarg'" - ac_need_defaults=false;; - --he | --h | --help | --hel | -h ) - $as_echo "$ac_cs_usage"; exit ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil | --si | --s) - ac_cs_silent=: ;; - - # This is an error. - -*) as_fn_error $? "unrecognized option: \`$1' -Try \`$0 --help' for more information." ;; - - *) as_fn_append ac_config_targets " $1" - ac_need_defaults=false ;; - - esac - shift -done - -ac_configure_extra_args= - -if $ac_cs_silent; then - exec 6>/dev/null - ac_configure_extra_args="$ac_configure_extra_args --silent" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -if \$ac_cs_recheck; then - set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion - shift - \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 - CONFIG_SHELL='$SHELL' - export CONFIG_SHELL - exec "\$@" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -exec 5>>config.log -{ - echo - sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX -## Running $as_me. ## -_ASBOX - $as_echo "$ac_log" -} >&5 - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - -# Handling of arguments. -for ac_config_target in $ac_config_targets -do - case $ac_config_target in - "src/Makevars") CONFIG_FILES="$CONFIG_FILES src/Makevars" ;; - - *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; - esac -done - - -# If the user did not use the arguments to specify the items to instantiate, -# then the envvar interface is used. Set only those that are not. -# We use the long form for the default assignment because of an extremely -# bizarre bug on SunOS 4.1.3. -if $ac_need_defaults; then - test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files -fi - -# Have a temporary directory for convenience. Make it in the build tree -# simply because there is no reason against having it here, and in addition, -# creating and moving files from /tmp can sometimes cause problems. -# Hook for its removal unless debugging. -# Note that there is a small window in which the directory will not be cleaned: -# after its creation but before its name has been assigned to `$tmp'. -$debug || -{ - tmp= ac_tmp= - trap 'exit_status=$? - : "${ac_tmp:=$tmp}" - { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status -' 0 - trap 'as_fn_exit 1' 1 2 13 15 -} -# Create a (secure) tmp directory for tmp files. - -{ - tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -d "$tmp" -} || -{ - tmp=./conf$$-$RANDOM - (umask 077 && mkdir "$tmp") -} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 -ac_tmp=$tmp - -# Set up the scripts for CONFIG_FILES section. -# No need to generate them if there are no CONFIG_FILES. -# This happens for instance with `./config.status config.h'. -if test -n "$CONFIG_FILES"; then - - -ac_cr=`echo X | tr X '\015'` -# On cygwin, bash can eat \r inside `` if the user requested igncr. -# But we know of no other shell where ac_cr would be empty at this -# point, so we can use a bashism as a fallback. -if test "x$ac_cr" = x; then - eval ac_cr=\$\'\\r\' -fi -ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` -if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then - ac_cs_awk_cr='\\r' -else - ac_cs_awk_cr=$ac_cr -fi - -echo 'BEGIN {' >"$ac_tmp/subs1.awk" && -_ACEOF - - -{ - echo "cat >conf$$subs.awk <<_ACEOF" && - echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && - echo "_ACEOF" -} >conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 -ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` -ac_delim='%!_!# ' -for ac_last_try in false false false false false :; do - . ./conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - - ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` - if test $ac_delim_n = $ac_delim_num; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done -rm -f conf$$subs.sh - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && -_ACEOF -sed -n ' -h -s/^/S["/; s/!.*/"]=/ -p -g -s/^[^!]*!// -:repl -t repl -s/'"$ac_delim"'$// -t delim -:nl -h -s/\(.\{148\}\)..*/\1/ -t more1 -s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ -p -n -b repl -:more1 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t nl -:delim -h -s/\(.\{148\}\)..*/\1/ -t more2 -s/["\\]/\\&/g; s/^/"/; s/$/"/ -p -b -:more2 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t delim -' >$CONFIG_STATUS || ac_write_fail=1 -rm -f conf$$subs.awk -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACAWK -cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && - for (key in S) S_is_set[key] = 1 - FS = "" - -} -{ - line = $ 0 - nfields = split(line, field, "@") - substed = 0 - len = length(field[1]) - for (i = 2; i < nfields; i++) { - key = field[i] - keylen = length(key) - if (S_is_set[key]) { - value = S[key] - line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) - len += length(value) + length(field[++i]) - substed = 1 - } else - len += 1 + keylen - } - - print line -} - -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then - sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" -else - cat -fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ - || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 -_ACEOF - -# VPATH may cause trouble with some makes, so we remove sole $(srcdir), -# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and -# trailing colons and then remove the whole line if VPATH becomes empty -# (actually we leave an empty line to preserve line numbers). -if test "x$srcdir" = x.; then - ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ -h -s/// -s/^/:/ -s/[ ]*$/:/ -s/:\$(srcdir):/:/g -s/:\${srcdir}:/:/g -s/:@srcdir@:/:/g -s/^:*// -s/:*$// -x -s/\(=[ ]*\).*/\1/ -G -s/\n// -s/^[^=]*=[ ]*$// -}' -fi - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -fi # test -n "$CONFIG_FILES" - - -eval set X " :F $CONFIG_FILES " -shift -for ac_tag -do - case $ac_tag in - :[FHLC]) ac_mode=$ac_tag; continue;; - esac - case $ac_mode$ac_tag in - :[FHL]*:*);; - :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; - :[FH]-) ac_tag=-:-;; - :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; - esac - ac_save_IFS=$IFS - IFS=: - set x $ac_tag - IFS=$ac_save_IFS - shift - ac_file=$1 - shift - - case $ac_mode in - :L) ac_source=$1;; - :[FH]) - ac_file_inputs= - for ac_f - do - case $ac_f in - -) ac_f="$ac_tmp/stdin";; - *) # Look for the file first in the build tree, then in the source tree - # (if the path is not absolute). The absolute path cannot be DOS-style, - # because $ac_f cannot contain `:'. - test -f "$ac_f" || - case $ac_f in - [\\/$]*) false;; - *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; - esac || - as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; - esac - case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac - as_fn_append ac_file_inputs " '$ac_f'" - done - - # Let's still pretend it is `configure' which instantiates (i.e., don't - # use $as_me), people would be surprised to read: - # /* config.h. Generated by config.status. */ - configure_input='Generated from '` - $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' - `' by configure.' - if test x"$ac_file" != x-; then - configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 -$as_echo "$as_me: creating $ac_file" >&6;} - fi - # Neutralize special characters interpreted by sed in replacement strings. - case $configure_input in #( - *\&* | *\|* | *\\* ) - ac_sed_conf_input=`$as_echo "$configure_input" | - sed 's/[\\\\&|]/\\\\&/g'`;; #( - *) ac_sed_conf_input=$configure_input;; - esac - - case $ac_tag in - *:-:* | *:-) cat >"$ac_tmp/stdin" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; - esac - ;; - esac - - ac_dir=`$as_dirname -- "$ac_file" || -$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$ac_file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - as_dir="$ac_dir"; as_fn_mkdir_p - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - - case $ac_mode in - :F) - # - # CONFIG_FILE - # - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# If the template does not know about datarootdir, expand it. -# FIXME: This hack should be removed a few years after 2.60. -ac_datarootdir_hack=; ac_datarootdir_seen= -ac_sed_dataroot=' -/datarootdir/ { - p - q -} -/@datadir@/p -/@docdir@/p -/@infodir@/p -/@localedir@/p -/@mandir@/p' -case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in -*datarootdir*) ac_datarootdir_seen=yes;; -*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 -$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - ac_datarootdir_hack=' - s&@datadir@&$datadir&g - s&@docdir@&$docdir&g - s&@infodir@&$infodir&g - s&@localedir@&$localedir&g - s&@mandir@&$mandir&g - s&\\\${datarootdir}&$datarootdir&g' ;; -esac -_ACEOF - -# Neutralize VPATH when `$srcdir' = `.'. -# Shell code in configure.ac might set extrasub. -# FIXME: do we really want to maintain this feature? -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_sed_extra="$ac_vpsub -$extrasub -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -:t -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b -s|@configure_input@|$ac_sed_conf_input|;t t -s&@top_builddir@&$ac_top_builddir_sub&;t t -s&@top_build_prefix@&$ac_top_build_prefix&;t t -s&@srcdir@&$ac_srcdir&;t t -s&@abs_srcdir@&$ac_abs_srcdir&;t t -s&@top_srcdir@&$ac_top_srcdir&;t t -s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t -s&@builddir@&$ac_builddir&;t t -s&@abs_builddir@&$ac_abs_builddir&;t t -s&@abs_top_builddir@&$ac_abs_top_builddir&;t t -$ac_datarootdir_hack -" -eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ - >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - -test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && - { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && - { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ - "$ac_tmp/out"`; test -z "$ac_out"; } && - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&5 -$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&2;} - - rm -f "$ac_tmp/stdin" - case $ac_file in - -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; - *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; - esac \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - ;; - - - - esac - -done # for ac_tag - - -as_fn_exit 0 -_ACEOF -ac_clean_files=$ac_clean_files_save - -test $ac_write_fail = 0 || - as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 - - -# configure is writing to config.log, and then calls config.status. -# config.status does its own redirection, appending to config.log. -# Unfortunately, on DOS this fails, as config.log is still kept open -# by configure, so config.status won't be able to write to it; its -# output is simply discarded. So we exec the FD to /dev/null, -# effectively closing config.log, so it can be properly (re)opened and -# appended to by config.status. When coming back to configure, we -# need to make the FD available again. -if test "$no_create" != yes; then - ac_cs_success=: - ac_config_status_args= - test "$silent" = yes && - ac_config_status_args="$ac_config_status_args --quiet" - exec 5>/dev/null - $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false - exec 5>>config.log - # Use ||, not &&, to avoid exiting from the if with $? = 1, which - # would make configure fail if this is the last instruction. - $ac_cs_success || as_fn_exit 1 -fi -if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 -$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} -fi - - diff --git a/ml-xgboost/R-package/configure.ac b/ml-xgboost/R-package/configure.ac deleted file mode 100644 index c683a94..0000000 --- a/ml-xgboost/R-package/configure.ac +++ /dev/null @@ -1,55 +0,0 @@ -### configure.ac -*- Autoconf -*- - -AC_PREREQ(2.62) - -AC_INIT([xgboost],[0.6-3],[],[xgboost],[]) - -# Use this line to set CC variable to a C compiler -AC_PROG_CC - -### Check whether backtrace() is part of libc or the external lib libexecinfo -AC_MSG_CHECKING([Backtrace lib]) -AC_MSG_RESULT([]) -AC_CHECK_LIB([execinfo], [backtrace], [BACKTRACE_LIB=-lexecinfo], [BACKTRACE_LIB='']) - -### Endian detection -AC_MSG_CHECKING([endian]) -AC_MSG_RESULT([]) -AC_RUN_IFELSE([AC_LANG_PROGRAM([[#include ]], [[const uint16_t endianness = 256; return !!(*(const uint8_t *)&endianness);]])], - [ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=1"], - [ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=0"]) - -OPENMP_CXXFLAGS="" - -if test `uname -s` = "Linux" -then - OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)" -fi - -if test `uname -s` = "Darwin" -then - OPENMP_CXXFLAGS='-Xclang -fopenmp' - OPENMP_LIB='/usr/local/lib/libomp.dylib' - ac_pkg_openmp=no - AC_MSG_CHECKING([whether OpenMP will work in a package]) - AC_LANG_CONFTEST([AC_LANG_PROGRAM([[#include ]], [[ return (omp_get_max_threads() <= 1); ]])]) - ${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes - AC_MSG_RESULT([${ac_pkg_openmp}]) - if test "${ac_pkg_openmp}" = no; then - OPENMP_CXXFLAGS='' - OPENMP_LIB='' - echo '*****************************************************************************************' - echo 'WARNING: OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.' - echo ' To use all CPU cores for training jobs, you should install OpenMP by running\n' - echo ' brew install libomp' - echo '*****************************************************************************************' - fi -fi - -AC_SUBST(OPENMP_CXXFLAGS) -AC_SUBST(OPENMP_LIB) -AC_SUBST(ENDIAN_FLAG) -AC_SUBST(BACKTRACE_LIB) -AC_CONFIG_FILES([src/Makevars]) -AC_OUTPUT - diff --git a/ml-xgboost/R-package/configure.win b/ml-xgboost/R-package/configure.win deleted file mode 100644 index e69de29..0000000 diff --git a/ml-xgboost/R-package/demo/00Index b/ml-xgboost/R-package/demo/00Index deleted file mode 100644 index 5c949d0..0000000 --- a/ml-xgboost/R-package/demo/00Index +++ /dev/null @@ -1,15 +0,0 @@ -basic_walkthrough Basic feature walkthrough -caret_wrapper Use xgboost to train in caret library -custom_objective Cutomize loss function, and evaluation metric -boost_from_prediction Boosting from existing prediction -predict_first_ntree Predicting using first n trees -generalized_linear_model Generalized Linear Model -cross_validation Cross validation -create_sparse_matrix Create Sparse Matrix -predict_leaf_indices Predicting the corresponding leaves -early_stopping Early Stop in training -poisson_regression Poisson Regression on count data -tweedie_regression Tweddie Regression -gpu_accelerated GPU-accelerated tree building algorithms -interaction_constraints Interaction constraints among features - diff --git a/ml-xgboost/R-package/demo/README.md b/ml-xgboost/R-package/demo/README.md deleted file mode 100644 index e53afea..0000000 --- a/ml-xgboost/R-package/demo/README.md +++ /dev/null @@ -1,20 +0,0 @@ -XGBoost R Feature Walkthrough -==== -* [Basic walkthrough of wrappers](basic_walkthrough.R) -* [Train a xgboost model from caret library](caret_wrapper.R) -* [Cutomize loss function, and evaluation metric](custom_objective.R) -* [Boosting from existing prediction](boost_from_prediction.R) -* [Predicting using first n trees](predict_first_ntree.R) -* [Generalized Linear Model](generalized_linear_model.R) -* [Cross validation](cross_validation.R) -* [Create a sparse matrix from a dense one](create_sparse_matrix.R) -* [Use GPU-accelerated tree building algorithms](gpu_accelerated.R) - -Benchmarks -==== -* [Starter script for Kaggle Higgs Boson](../../demo/kaggle-higgs) - -Notes -==== -* Contribution of examples, benchmarks is more than welcomed! -* If you like to share how you use xgboost to solve your problem, send a pull request:) diff --git a/ml-xgboost/R-package/demo/basic_walkthrough.R b/ml-xgboost/R-package/demo/basic_walkthrough.R deleted file mode 100644 index bb6b850..0000000 --- a/ml-xgboost/R-package/demo/basic_walkthrough.R +++ /dev/null @@ -1,112 +0,0 @@ -require(xgboost) -require(methods) - -# we load in the agaricus dataset -# In this example, we are aiming to predict whether a mushroom is edible -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test -# the loaded data is stored in sparseMatrix, and label is a numeric vector in {0,1} -class(train$label) -class(train$data) - -#-------------Basic Training using XGBoost----------------- -# this is the basic usage of xgboost you can put matrix in data field -# note: we are putting in sparse matrix here, xgboost naturally handles sparse input -# use sparse matrix when your feature is sparse(e.g. when you are using one-hot encoding vector) -print("Training xgboost with sparseMatrix") -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nrounds = 2, - nthread = 2, objective = "binary:logistic") -# alternatively, you can put in dense matrix, i.e. basic R-matrix -print("Training xgboost with Matrix") -bst <- xgboost(data = as.matrix(train$data), label = train$label, max_depth = 2, eta = 1, nrounds = 2, - nthread = 2, objective = "binary:logistic") - -# you can also put in xgb.DMatrix object, which stores label, data and other meta datas needed for advanced features -print("Training xgboost with xgb.DMatrix") -dtrain <- xgb.DMatrix(data = train$data, label = train$label) -bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, nthread = 2, - objective = "binary:logistic") - -# Verbose = 0,1,2 -print("Train xgboost with verbose 0, no message") -bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, - nthread = 2, objective = "binary:logistic", verbose = 0) -print("Train xgboost with verbose 1, print evaluation metric") -bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, - nthread = 2, objective = "binary:logistic", verbose = 1) -print("Train xgboost with verbose 2, also print information about tree") -bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, - nthread = 2, objective = "binary:logistic", verbose = 2) - -# you can also specify data as file path to a LibSVM format input -# since we do not have this file with us, the following line is just for illustration -# bst <- xgboost(data = 'agaricus.train.svm', max_depth = 2, eta = 1, nrounds = 2,objective = "binary:logistic") - -#--------------------basic prediction using xgboost-------------- -# you can do prediction using the following line -# you can put in Matrix, sparseMatrix, or xgb.DMatrix -pred <- predict(bst, test$data) -err <- mean(as.numeric(pred > 0.5) != test$label) -print(paste("test-error=", err)) - -#-------------------save and load models------------------------- -# save model to binary local file -xgb.save(bst, "xgboost.model") -# load binary model to R -bst2 <- xgb.load("xgboost.model") -pred2 <- predict(bst2, test$data) -# pred2 should be identical to pred -print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred)))) - -# save model to R's raw vector -raw = xgb.save.raw(bst) -# load binary model to R -bst3 <- xgb.load(raw) -pred3 <- predict(bst3, test$data) -# pred3 should be identical to pred -print(paste("sum(abs(pred3-pred))=", sum(abs(pred3-pred)))) - -#----------------Advanced features -------------- -# to use advanced features, we need to put data in xgb.DMatrix -dtrain <- xgb.DMatrix(data = train$data, label=train$label) -dtest <- xgb.DMatrix(data = test$data, label=test$label) -#---------------Using watchlist---------------- -# watchlist is a list of xgb.DMatrix, each of them is tagged with name -watchlist <- list(train=dtrain, test=dtest) -# to train with watchlist, use xgb.train, which contains more advanced features -# watchlist allows us to monitor the evaluation result on all data in the list -print("Train xgboost using xgb.train with watchlist") -bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist, - nthread = 2, objective = "binary:logistic") -# we can change evaluation metrics, or use multiple evaluation metrics -print("train xgboost using xgb.train with watchlist, watch logloss and error") -bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist, - eval_metric = "error", eval_metric = "logloss", - nthread = 2, objective = "binary:logistic") - -# xgb.DMatrix can also be saved using xgb.DMatrix.save -xgb.DMatrix.save(dtrain, "dtrain.buffer") -# to load it in, simply call xgb.DMatrix -dtrain2 <- xgb.DMatrix("dtrain.buffer") -bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nrounds=2, watchlist=watchlist, - nthread = 2, objective = "binary:logistic") -# information can be extracted from xgb.DMatrix using getinfo -label = getinfo(dtest, "label") -pred <- predict(bst, dtest) -err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label) -print(paste("test-error=", err)) - -# You can dump the tree you learned using xgb.dump into a text file -dump_path = file.path(tempdir(), 'dump.raw.txt') -xgb.dump(bst, dump_path, with_stats = T) - -# Finally, you can check which features are the most important. -print("Most important features (look at column Gain):") -imp_matrix <- xgb.importance(feature_names = colnames(train$data), model = bst) -print(imp_matrix) - -# Feature importance bar plot by gain -print("Feature importance Plot : ") -print(xgb.plot.importance(importance_matrix = imp_matrix)) diff --git a/ml-xgboost/R-package/demo/boost_from_prediction.R b/ml-xgboost/R-package/demo/boost_from_prediction.R deleted file mode 100644 index 1765650..0000000 --- a/ml-xgboost/R-package/demo/boost_from_prediction.R +++ /dev/null @@ -1,26 +0,0 @@ -require(xgboost) -# load in the agaricus dataset -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) - -watchlist <- list(eval = dtest, train = dtrain) -### -# advanced: start from a initial base prediction -# -print('start running example to start from a initial prediction') -# train xgboost for 1 round -param <- list(max_depth=2, eta=1, nthread = 2, silent=1, objective='binary:logistic') -bst <- xgb.train(param, dtrain, 1, watchlist) -# Note: we need the margin value instead of transformed prediction in set_base_margin -# do predict with output_margin=TRUE, will always give you margin values before logistic transformation -ptrain <- predict(bst, dtrain, outputmargin=TRUE) -ptest <- predict(bst, dtest, outputmargin=TRUE) -# set the base_margin property of dtrain and dtest -# base margin is the base prediction we will boost from -setinfo(dtrain, "base_margin", ptrain) -setinfo(dtest, "base_margin", ptest) - -print('this is result of boost from initial prediction') -bst <- xgb.train(params = param, data = dtrain, nrounds = 1, watchlist = watchlist) diff --git a/ml-xgboost/R-package/demo/caret_wrapper.R b/ml-xgboost/R-package/demo/caret_wrapper.R deleted file mode 100644 index 751b202..0000000 --- a/ml-xgboost/R-package/demo/caret_wrapper.R +++ /dev/null @@ -1,35 +0,0 @@ -# install development version of caret library that contains xgboost models -devtools::install_github("topepo/caret/pkg/caret") -require(caret) -require(xgboost) -require(data.table) -require(vcd) -require(e1071) - -# Load Arthritis dataset in memory. -data(Arthritis) -# Create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good). -df <- data.table(Arthritis, keep.rownames = F) - -# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features. -# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values. -df[,AgeDiscret:= as.factor(round(Age/10,0))] - -# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!). -df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))] - -# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small). -df[,ID:=NULL] - -#-------------Basic Training using XGBoost in caret Library----------------- -# Set up control parameters for caret::train -# Here we use 10-fold cross-validation, repeating twice, and using random search for tuning hyper-parameters. -fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 2, search = "random") -# train a xgbTree model using caret::train -model <- train(factor(Improved)~., data = df, method = "xgbTree", trControl = fitControl) - -# Instead of tree for our boosters, you can also fit a linear regression or logistic regression model using xgbLinear -# model <- train(factor(Improved)~., data = df, method = "xgbLinear", trControl = fitControl) - -# See model results -print(model) diff --git a/ml-xgboost/R-package/demo/create_sparse_matrix.R b/ml-xgboost/R-package/demo/create_sparse_matrix.R deleted file mode 100644 index 6069f33..0000000 --- a/ml-xgboost/R-package/demo/create_sparse_matrix.R +++ /dev/null @@ -1,89 +0,0 @@ -require(xgboost) -require(Matrix) -require(data.table) -if (!require(vcd)) { - install.packages('vcd') #Available in Cran. Used for its dataset with categorical values. - require(vcd) -} -# According to its documentation, Xgboost works only on numbers. -# Sometimes the dataset we have to work on have categorical data. -# A categorical variable is one which have a fixed number of values. By example, if for each observation a variable called "Colour" can have only "red", "blue" or "green" as value, it is a categorical variable. -# -# In R, categorical variable is called Factor. -# Type ?factor in console for more information. -# -# In this demo we will see how to transform a dense dataframe with categorical variables to a sparse matrix before analyzing it in Xgboost. -# The method we are going to see is usually called "one hot encoding". - -#load Arthritis dataset in memory. -data(Arthritis) - -# create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good). -df <- data.table(Arthritis, keep.rownames = F) - -# Let's have a look to the data.table -cat("Print the dataset\n") -print(df) - -# 2 columns have factor type, one has ordinal type (ordinal variable is a categorical variable with values wich can be ordered, here: None > Some > Marked). -cat("Structure of the dataset\n") -str(df) - -# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features. - -# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values. -df[,AgeDiscret:= as.factor(round(Age/10,0))] - -# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!). -df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))] - -# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small). -df[,ID:=NULL] - -# List the different values for the column Treatment: Placebo, Treated. -cat("Values of the categorical feature Treatment\n") -print(levels(df[,Treatment])) - -# Next step, we will transform the categorical data to dummy variables. -# This method is also called one hot encoding. -# The purpose is to transform each value of each categorical feature in one binary feature. -# -# Let's take, the column Treatment will be replaced by two columns, Placebo, and Treated. Each of them will be binary. For example an observation which had the value Placebo in column Treatment before the transformation will have, after the transformation, the value 1 in the new column Placebo and the value 0 in the new column Treated. -# -# Formulae Improved~.-1 used below means transform all categorical features but column Improved to binary values. -# Column Improved is excluded because it will be our output column, the one we want to predict. -sparse_matrix = sparse.model.matrix(Improved~.-1, data = df) - -cat("Encoding of the sparse Matrix\n") -print(sparse_matrix) - -# Create the output vector (not sparse) -# 1. Set, for all rows, field in Y column to 0; -# 2. set Y to 1 when Improved == Marked; -# 3. Return Y column -output_vector = df[,Y:=0][Improved == "Marked",Y:=1][,Y] - -# Following is the same process as other demo -cat("Learning...\n") -bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 9, - eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic") - -importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst) -print(importance) -# According to the matrix below, the most important feature in this dataset to predict if the treatment will work is the Age. The second most important feature is having received a placebo or not. The sex is third. Then we see our generated features (AgeDiscret). We can see that their contribution is very low (Gain column). - -# Does these result make sense? -# Let's check some Chi2 between each of these features and the outcome. - -print(chisq.test(df$Age, df$Y)) -# Pearson correlation between Age and illness disappearing is 35 - -print(chisq.test(df$AgeDiscret, df$Y)) -# Our first simplification of Age gives a Pearson correlation of 8. - -print(chisq.test(df$AgeCat, df$Y)) -# The perfectly random split I did between young and old at 30 years old have a low correlation of 2. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same. Don't let your "gut" lower the quality of your model. In "data science", there is science :-) - -# As you can see, in general destroying information by simplifying it won't improve your model. Chi2 just demonstrates that. But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model. The case studied here is not enough complex to show that. Check Kaggle forum for some challenging datasets. -# However it's almost always worse when you add some arbitrary rules. -# Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age. Linear model may not be that strong in these scenario. diff --git a/ml-xgboost/R-package/demo/cross_validation.R b/ml-xgboost/R-package/demo/cross_validation.R deleted file mode 100644 index d074552..0000000 --- a/ml-xgboost/R-package/demo/cross_validation.R +++ /dev/null @@ -1,51 +0,0 @@ -require(xgboost) -# load in the agaricus dataset -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) - -nrounds <- 2 -param <- list(max_depth=2, eta=1, silent=1, nthread=2, objective='binary:logistic') - -cat('running cross validation\n') -# do cross validation, this will print result out as -# [iteration] metric_name:mean_value+std_value -# std_value is standard deviation of the metric -xgb.cv(param, dtrain, nrounds, nfold=5, metrics={'error'}) - -cat('running cross validation, disable standard deviation display\n') -# do cross validation, this will print result out as -# [iteration] metric_name:mean_value+std_value -# std_value is standard deviation of the metric -xgb.cv(param, dtrain, nrounds, nfold=5, - metrics='error', showsd = FALSE) - -### -# you can also do cross validation with cutomized loss function -# See custom_objective.R -## -print ('running cross validation, with cutomsized loss function') - -logregobj <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - preds <- 1/(1 + exp(-preds)) - grad <- preds - labels - hess <- preds * (1 - preds) - return(list(grad = grad, hess = hess)) -} -evalerror <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - err <- as.numeric(sum(labels != (preds > 0)))/length(labels) - return(list(metric = "error", value = err)) -} - -param <- list(max_depth=2, eta=1, silent=1, - objective = logregobj, eval_metric = evalerror) -# train with customized objective -xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5) - -# do cross validation with prediction values for each fold -res <- xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5, prediction = TRUE) -res$evaluation_log -length(res$pred) diff --git a/ml-xgboost/R-package/demo/custom_objective.R b/ml-xgboost/R-package/demo/custom_objective.R deleted file mode 100644 index ec7e7e8..0000000 --- a/ml-xgboost/R-package/demo/custom_objective.R +++ /dev/null @@ -1,65 +0,0 @@ -require(xgboost) -# load in the agaricus dataset -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) - -# note: for customized objective function, we leave objective as default -# note: what we are getting is margin value in prediction -# you must know what you are doing -watchlist <- list(eval = dtest, train = dtrain) -num_round <- 2 - -# user define objective function, given prediction, return gradient and second order gradient -# this is loglikelihood loss -logregobj <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - preds <- 1/(1 + exp(-preds)) - grad <- preds - labels - hess <- preds * (1 - preds) - return(list(grad = grad, hess = hess)) -} - -# user defined evaluation function, return a pair metric_name, result -# NOTE: when you do customized loss function, the default prediction value is margin -# this may make buildin evalution metric not function properly -# for example, we are doing logistic loss, the prediction is score before logistic transformation -# the buildin evaluation error assumes input is after logistic transformation -# Take this in mind when you use the customization, and maybe you need write customized evaluation function -evalerror <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - err <- as.numeric(sum(labels != (preds > 0)))/length(labels) - return(list(metric = "error", value = err)) -} - -param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0, - objective=logregobj, eval_metric=evalerror) -print ('start training with user customized objective') -# training with customized objective, we can also do step by step training -# simply look at xgboost.py's implementation of train -bst <- xgb.train(param, dtrain, num_round, watchlist) - -# -# there can be cases where you want additional information -# being considered besides the property of DMatrix you can get by getinfo -# you can set additional information as attributes if DMatrix - -# set label attribute of dtrain to be label, we use label as an example, it can be anything -attr(dtrain, 'label') <- getinfo(dtrain, 'label') -# this is new customized objective, where you can access things you set -# same thing applies to customized evaluation function -logregobjattr <- function(preds, dtrain) { - # now you can access the attribute in customized function - labels <- attr(dtrain, 'label') - preds <- 1/(1 + exp(-preds)) - grad <- preds - labels - hess <- preds * (1 - preds) - return(list(grad = grad, hess = hess)) -} -param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0, - objective=logregobjattr, eval_metric=evalerror) -print ('start training with user customized objective, with additional attributes in DMatrix') -# training with customized objective, we can also do step by step training -# simply look at xgboost.py's implementation of train -bst <- xgb.train(param, dtrain, num_round, watchlist) diff --git a/ml-xgboost/R-package/demo/early_stopping.R b/ml-xgboost/R-package/demo/early_stopping.R deleted file mode 100644 index 92a3ee8..0000000 --- a/ml-xgboost/R-package/demo/early_stopping.R +++ /dev/null @@ -1,40 +0,0 @@ -require(xgboost) -# load in the agaricus dataset -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) -# note: for customized objective function, we leave objective as default -# note: what we are getting is margin value in prediction -# you must know what you are doing -param <- list(max_depth=2, eta=1, nthread=2, verbosity=0) -watchlist <- list(eval = dtest) -num_round <- 20 -# user define objective function, given prediction, return gradient and second order gradient -# this is loglikelihood loss -logregobj <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - preds <- 1/(1 + exp(-preds)) - grad <- preds - labels - hess <- preds * (1 - preds) - return(list(grad = grad, hess = hess)) -} -# user defined evaluation function, return a pair metric_name, result -# NOTE: when you do customized loss function, the default prediction value is margin -# this may make buildin evalution metric not function properly -# for example, we are doing logistic loss, the prediction is score before logistic transformation -# the buildin evaluation error assumes input is after logistic transformation -# Take this in mind when you use the customization, and maybe you need write customized evaluation function -evalerror <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - err <- as.numeric(sum(labels != (preds > 0)))/length(labels) - return(list(metric = "error", value = err)) -} -print ('start training with early Stopping setting') - -bst <- xgb.train(param, dtrain, num_round, watchlist, - objective = logregobj, eval_metric = evalerror, maximize = FALSE, - early_stopping_round = 3) -bst <- xgb.cv(param, dtrain, num_round, nfold = 5, - objective = logregobj, eval_metric = evalerror, - maximize = FALSE, early_stopping_rounds = 3) diff --git a/ml-xgboost/R-package/demo/generalized_linear_model.R b/ml-xgboost/R-package/demo/generalized_linear_model.R deleted file mode 100644 index 3c2cdb5..0000000 --- a/ml-xgboost/R-package/demo/generalized_linear_model.R +++ /dev/null @@ -1,34 +0,0 @@ -require(xgboost) -# load in the agaricus dataset -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) -## -# this script demonstrate how to fit generalized linear model in xgboost -# basically, we are using linear model, instead of tree for our boosters -# you can fit a linear regression, or logistic regression model -## - -# change booster to gblinear, so that we are fitting a linear model -# alpha is the L1 regularizer -# lambda is the L2 regularizer -# you can also set lambda_bias which is L2 regularizer on the bias term -param <- list(objective = "binary:logistic", booster = "gblinear", - nthread = 2, alpha = 0.0001, lambda = 1) - -# normally, you do not need to set eta (step_size) -# XGBoost uses a parallel coordinate descent algorithm (shotgun), -# there could be affection on convergence with parallelization on certain cases -# setting eta to be smaller value, e.g 0.5 can make the optimization more stable - -## -# the rest of settings are the same -## -watchlist <- list(eval = dtest, train = dtrain) -num_round <- 2 -bst <- xgb.train(param, dtrain, num_round, watchlist) -ypred <- predict(bst, dtest) -labels <- getinfo(dtest, 'label') -cat('error of preds=', mean(as.numeric(ypred>0.5)!=labels),'\n') - diff --git a/ml-xgboost/R-package/demo/gpu_accelerated.R b/ml-xgboost/R-package/demo/gpu_accelerated.R deleted file mode 100644 index 321255c..0000000 --- a/ml-xgboost/R-package/demo/gpu_accelerated.R +++ /dev/null @@ -1,45 +0,0 @@ -# An example of using GPU-accelerated tree building algorithms -# -# NOTE: it can only run if you have a CUDA-enable GPU and the package was -# specially compiled with GPU support. -# -# For the current functionality, see -# https://xgboost.readthedocs.io/en/latest/gpu/index.html -# - -library('xgboost') - -# Simulate N x p random matrix with some binomial response dependent on pp columns -set.seed(111) -N <- 1000000 -p <- 50 -pp <- 25 -X <- matrix(runif(N * p), ncol = p) -betas <- 2 * runif(pp) - 1 -sel <- sort(sample(p, pp)) -m <- X[, sel] %*% betas - 1 + rnorm(N) -y <- rbinom(N, 1, plogis(m)) - -tr <- sample.int(N, N * 0.75) -dtrain <- xgb.DMatrix(X[tr,], label = y[tr]) -dtest <- xgb.DMatrix(X[-tr,], label = y[-tr]) -wl <- list(train = dtrain, test = dtest) - -# An example of running 'gpu_hist' algorithm -# which is -# - similar to the 'hist' -# - the fastest option for moderately large datasets -# - current limitations: max_depth < 16, does not implement guided loss -# You can use tree_method = 'gpu_hist' for another GPU accelerated algorithm, -# which is slower, more memory-hungry, but does not use binning. -param <- list(objective = 'reg:logistic', eval_metric = 'auc', subsample = 0.5, nthread = 4, - max_bin = 64, tree_method = 'gpu_hist') -pt <- proc.time() -bst_gpu <- xgb.train(param, dtrain, watchlist = wl, nrounds = 50) -proc.time() - pt - -# Compare to the 'hist' algorithm: -param$tree_method <- 'hist' -pt <- proc.time() -bst_hist <- xgb.train(param, dtrain, watchlist = wl, nrounds = 50) -proc.time() - pt diff --git a/ml-xgboost/R-package/demo/interaction_constraints.R b/ml-xgboost/R-package/demo/interaction_constraints.R deleted file mode 100644 index 2f2edb1..0000000 --- a/ml-xgboost/R-package/demo/interaction_constraints.R +++ /dev/null @@ -1,105 +0,0 @@ -library(xgboost) -library(data.table) - -set.seed(1024) - -# Function to obtain a list of interactions fitted in trees, requires input of maximum depth -treeInteractions <- function(input_tree, input_max_depth){ - trees <- copy(input_tree) # copy tree input to prevent overwriting - if (input_max_depth < 2) return(list()) # no interactions if max depth < 2 - if (nrow(input_tree) == 1) return(list()) - - # Attach parent nodes - for (i in 2:input_max_depth){ - if (i == 2) trees[, ID_merge:=ID] else trees[, ID_merge:=get(paste0('parent_',i-2))] - parents_left <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=Yes)] - parents_right <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=No)] - - setorderv(trees, 'ID_merge') - setorderv(parents_left, 'ID_merge') - setorderv(parents_right, 'ID_merge') - - trees <- merge(trees, parents_left, by='ID_merge', all.x=T) - trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)] - trees[, c('i.id','i.feature'):=NULL] - - trees <- merge(trees, parents_right, by='ID_merge', all.x=T) - trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)] - trees[, c('i.id','i.feature'):=NULL] - } - - # Extract nodes with interactions - interaction_trees <- trees[!is.na(Split) & !is.na(parent_1), - c('Feature',paste0('parent_feat_',1:(input_max_depth-1))), with=F] - interaction_trees_split <- split(interaction_trees, 1:nrow(interaction_trees)) - interaction_list <- lapply(interaction_trees_split, as.character) - - # Remove NAs (no parent interaction) - interaction_list <- lapply(interaction_list, function(x) x[!is.na(x)]) - - # Remove non-interactions (same variable) - interaction_list <- lapply(interaction_list, unique) # remove same variables - interaction_length <- sapply(interaction_list, length) - interaction_list <- interaction_list[interaction_length > 1] - interaction_list <- unique(lapply(interaction_list, sort)) - return(interaction_list) -} - -# Generate sample data -x <- list() -for (i in 1:10){ - x[[i]] = i*rnorm(1000, 10) -} -x <- as.data.table(x) - -y = -1*x[, rowSums(.SD)] + x[['V1']]*x[['V2']] + x[['V3']]*x[['V4']]*x[['V5']] + rnorm(1000, 0.001) + 3*sin(x[['V7']]) - -train = as.matrix(x) - -# Interaction constraint list (column names form) -interaction_list <- list(c('V1','V2'),c('V3','V4','V5')) - -# Convert interaction constraint list into feature index form -cols2ids <- function(object, col_names) { - LUT <- seq_along(col_names) - 1 - names(LUT) <- col_names - rapply(object, function(x) LUT[x], classes="character", how="replace") -} -interaction_list_fid = cols2ids(interaction_list, colnames(train)) - -# Fit model with interaction constraints -bst = xgboost(data = train, label = y, max_depth = 4, - eta = 0.1, nthread = 2, nrounds = 1000, - interaction_constraints = interaction_list_fid) - -bst_tree <- xgb.model.dt.tree(colnames(train), bst) -bst_interactions <- treeInteractions(bst_tree, 4) # interactions constrained to combinations of V1*V2 and V3*V4*V5 - -# Fit model without interaction constraints -bst2 = xgboost(data = train, label = y, max_depth = 4, - eta = 0.1, nthread = 2, nrounds = 1000) - -bst2_tree <- xgb.model.dt.tree(colnames(train), bst2) -bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions - -# Fit model with both interaction and monotonicity constraints -bst3 = xgboost(data = train, label = y, max_depth = 4, - eta = 0.1, nthread = 2, nrounds = 1000, - interaction_constraints = interaction_list_fid, - monotone_constraints = c(-1,0,0,0,0,0,0,0,0,0)) - -bst3_tree <- xgb.model.dt.tree(colnames(train), bst3) -bst3_interactions <- treeInteractions(bst3_tree, 4) # interactions still constrained to combinations of V1*V2 and V3*V4*V5 - -# Show monotonic constraints still apply by checking scores after incrementing V1 -x1 <- sort(unique(x[['V1']])) -for (i in 1:length(x1)){ - testdata <- copy(x[, -c('V1')]) - testdata[['V1']] <- x1[i] - testdata <- testdata[, paste0('V',1:10), with=F] - pred <- predict(bst3, as.matrix(testdata)) - - # Should not print out anything due to monotonic constraints - if (i > 1) if (any(pred > prev_pred)) print(i) - prev_pred <- pred -} diff --git a/ml-xgboost/R-package/demo/poisson_regression.R b/ml-xgboost/R-package/demo/poisson_regression.R deleted file mode 100644 index f9dc4ac..0000000 --- a/ml-xgboost/R-package/demo/poisson_regression.R +++ /dev/null @@ -1,7 +0,0 @@ -data(mtcars) -head(mtcars) -bst = xgboost(data=as.matrix(mtcars[,-11]),label=mtcars[,11], - objective='count:poisson',nrounds=5) -pred = predict(bst,as.matrix(mtcars[,-11])) -sqrt(mean((pred-mtcars[,11])^2)) - diff --git a/ml-xgboost/R-package/demo/predict_first_ntree.R b/ml-xgboost/R-package/demo/predict_first_ntree.R deleted file mode 100644 index 8934c55..0000000 --- a/ml-xgboost/R-package/demo/predict_first_ntree.R +++ /dev/null @@ -1,23 +0,0 @@ -require(xgboost) -# load in the agaricus dataset -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) - -param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic') -watchlist <- list(eval = dtest, train = dtrain) -nrounds = 2 - -# training the model for two rounds -bst = xgb.train(param, dtrain, nrounds, nthread = 2, watchlist) -cat('start testing prediction from first n trees\n') -labels <- getinfo(dtest,'label') - -### predict using first 1 tree -ypred1 = predict(bst, dtest, ntreelimit=1) -# by default, we predict using all the trees -ypred2 = predict(bst, dtest) - -cat('error of ypred1=', mean(as.numeric(ypred1>0.5)!=labels),'\n') -cat('error of ypred2=', mean(as.numeric(ypred2>0.5)!=labels),'\n') diff --git a/ml-xgboost/R-package/demo/predict_leaf_indices.R b/ml-xgboost/R-package/demo/predict_leaf_indices.R deleted file mode 100644 index 054bde7..0000000 --- a/ml-xgboost/R-package/demo/predict_leaf_indices.R +++ /dev/null @@ -1,53 +0,0 @@ -require(xgboost) -require(data.table) -require(Matrix) - -set.seed(1982) - -# load in the agaricus dataset -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label) - -param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic') -nrounds = 4 - -# training the model for two rounds -bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2) - -# Model accuracy without new features -accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / length(agaricus.test$label) - -# by default, we predict using all the trees - -pred_with_leaf = predict(bst, dtest, predleaf = TRUE) -head(pred_with_leaf) - -create.new.tree.features <- function(model, original.features){ - pred_with_leaf <- predict(model, original.features, predleaf = TRUE) - cols <- list() - for(i in 1:model$niter){ - # max is not the real max but it s not important for the purpose of adding features - leaf.id <- sort(unique(pred_with_leaf[,i])) - cols[[i]] <- factor(x = pred_with_leaf[,i], level = leaf.id) - } - cbind(original.features, sparse.model.matrix( ~ . -1, as.data.frame(cols))) -} - -# Convert previous features to one hot encoding -new.features.train <- create.new.tree.features(bst, agaricus.train$data) -new.features.test <- create.new.tree.features(bst, agaricus.test$data) -colnames(new.features.test) <- colnames(new.features.train) - -# learning with new features -new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label) -new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label) -watchlist <- list(train = new.dtrain) -bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2) - -# Model accuracy with new features -accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / length(agaricus.test$label) - -# Here the accuracy was already good and is now perfect. -cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", accuracy.after, "!\n")) diff --git a/ml-xgboost/R-package/demo/runall.R b/ml-xgboost/R-package/demo/runall.R deleted file mode 100644 index 0c1392a..0000000 --- a/ml-xgboost/R-package/demo/runall.R +++ /dev/null @@ -1,14 +0,0 @@ -# running all scripts in demo folder -demo(basic_walkthrough) -demo(custom_objective) -demo(boost_from_prediction) -demo(predict_first_ntree) -demo(generalized_linear_model) -demo(cross_validation) -demo(create_sparse_matrix) -demo(predict_leaf_indices) -demo(early_stopping) -demo(poisson_regression) -demo(caret_wrapper) -demo(tweedie_regression) -#demo(gpu_accelerated) # can only run when built with GPU support \ No newline at end of file diff --git a/ml-xgboost/R-package/demo/tweedie_regression.R b/ml-xgboost/R-package/demo/tweedie_regression.R deleted file mode 100644 index 4d272f6..0000000 --- a/ml-xgboost/R-package/demo/tweedie_regression.R +++ /dev/null @@ -1,49 +0,0 @@ -library(xgboost) -library(data.table) -library(cplm) - -data(AutoClaim) - -# auto insurance dataset analyzed by Yip and Yau (2005) -dt <- data.table(AutoClaim) - -# exclude these columns from the model matrix -exclude <- c('POLICYNO', 'PLCYDATE', 'CLM_FREQ5', 'CLM_AMT5', 'CLM_FLAG', 'IN_YY') - -# retains the missing values -# NOTE: this dataset is comes ready out of the box -options(na.action = 'na.pass') -x <- sparse.model.matrix(~ . - 1, data = dt[, -exclude, with = F]) -options(na.action = 'na.omit') - -# response -y <- dt[, CLM_AMT5] - -d_train <- xgb.DMatrix(data = x, label = y, missing = NA) - -# the tweedie_variance_power parameter determines the shape of -# distribution -# - closer to 1 is more poisson like and the mass -# is more concentrated near zero -# - closer to 2 is more gamma like and the mass spreads to the -# the right with less concentration near zero - -params <- list( - objective = 'reg:tweedie', - eval_metric = 'rmse', - tweedie_variance_power = 1.4, - max_depth = 6, - eta = 1) - -bst <- xgb.train( - data = d_train, - params = params, - maximize = FALSE, - watchlist = list(train = d_train), - nrounds = 20) - -var_imp <- xgb.importance(attr(x, 'Dimnames')[[2]], model = bst) - -preds <- predict(bst, d_train) - -rmse <- sqrt(sum(mean((y - preds)^2))) \ No newline at end of file diff --git a/ml-xgboost/R-package/man/agaricus.test.Rd b/ml-xgboost/R-package/man/agaricus.test.Rd deleted file mode 100644 index e3694ae..0000000 --- a/ml-xgboost/R-package/man/agaricus.test.Rd +++ /dev/null @@ -1,33 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgboost.R -\docType{data} -\name{agaricus.test} -\alias{agaricus.test} -\title{Test part from Mushroom Data Set} -\format{ -A list containing a label vector, and a dgCMatrix object with 1611 -rows and 126 variables -} -\usage{ -data(agaricus.test) -} -\description{ -This data set is originally from the Mushroom data set, -UCI Machine Learning Repository. -} -\details{ -This data set includes the following fields: - -\itemize{ - \item \code{label} the label for each record - \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns. -} -} -\references{ -https://archive.ics.uci.edu/ml/datasets/Mushroom - -Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository -[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, -School of Information and Computer Science. -} -\keyword{datasets} diff --git a/ml-xgboost/R-package/man/agaricus.train.Rd b/ml-xgboost/R-package/man/agaricus.train.Rd deleted file mode 100644 index 92692c9..0000000 --- a/ml-xgboost/R-package/man/agaricus.train.Rd +++ /dev/null @@ -1,33 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgboost.R -\docType{data} -\name{agaricus.train} -\alias{agaricus.train} -\title{Training part from Mushroom Data Set} -\format{ -A list containing a label vector, and a dgCMatrix object with 6513 -rows and 127 variables -} -\usage{ -data(agaricus.train) -} -\description{ -This data set is originally from the Mushroom data set, -UCI Machine Learning Repository. -} -\details{ -This data set includes the following fields: - -\itemize{ - \item \code{label} the label for each record - \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns. -} -} -\references{ -https://archive.ics.uci.edu/ml/datasets/Mushroom - -Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository -[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, -School of Information and Computer Science. -} -\keyword{datasets} diff --git a/ml-xgboost/R-package/man/callbacks.Rd b/ml-xgboost/R-package/man/callbacks.Rd deleted file mode 100644 index 9f6f690..0000000 --- a/ml-xgboost/R-package/man/callbacks.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/callbacks.R -\name{callbacks} -\alias{callbacks} -\title{Callback closures for booster training.} -\description{ -These are used to perform various service tasks either during boosting iterations or at the end. -This approach helps to modularize many of such tasks without bloating the main training methods, -and it offers . -} -\details{ -By default, a callback function is run after each boosting iteration. -An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function. - -When a callback function has \code{finalize} parameter, its finalizer part will also be run after -the boosting is completed. - -WARNING: side-effects!!! Be aware that these callback functions access and modify things in -the environment from which they are called from, which is a fairly uncommon thing to do in R. - -To write a custom callback closure, make sure you first understand the main concepts about R environments. -Check either R documentation on \code{\link[base]{environment}} or the -\href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R" -book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks - -choose ones that do something similar to what you want to achieve. Also, you would need to get familiar -with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments. -} -\seealso{ -\code{\link{cb.print.evaluation}}, -\code{\link{cb.evaluation.log}}, -\code{\link{cb.reset.parameters}}, -\code{\link{cb.early.stop}}, -\code{\link{cb.save.model}}, -\code{\link{cb.cv.predict}}, -\code{\link{xgb.train}}, -\code{\link{xgb.cv}} -} diff --git a/ml-xgboost/R-package/man/cb.cv.predict.Rd b/ml-xgboost/R-package/man/cb.cv.predict.Rd deleted file mode 100644 index ded899e..0000000 --- a/ml-xgboost/R-package/man/cb.cv.predict.Rd +++ /dev/null @@ -1,43 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/callbacks.R -\name{cb.cv.predict} -\alias{cb.cv.predict} -\title{Callback closure for returning cross-validation based predictions.} -\usage{ -cb.cv.predict(save_models = FALSE) -} -\arguments{ -\item{save_models}{a flag for whether to save the folds' models.} -} -\value{ -Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix, -depending on the number of prediction outputs per data row. The order of predictions corresponds -to the order of rows in the original dataset. Note that when a custom \code{folds} list is -provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a -non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be -meaningful when user-provided folds have overlapping indices as in, e.g., random sampling splits. -When some of the indices in the training dataset are not included into user-provided \code{folds}, -their prediction value would be \code{NA}. -} -\description{ -Callback closure for returning cross-validation based predictions. -} -\details{ -This callback function saves predictions for all of the test folds, -and also allows to save the folds' models. - -It is a "finalizer" callback and it uses early stopping information whenever it is available, -thus it must be run after the early stopping callback if the early stopping is used. - -Callback function expects the following values to be set in its calling frame: -\code{bst_folds}, -\code{basket}, -\code{data}, -\code{end_iteration}, -\code{params}, -\code{num_parallel_tree}, -\code{num_class}. -} -\seealso{ -\code{\link{callbacks}} -} diff --git a/ml-xgboost/R-package/man/cb.early.stop.Rd b/ml-xgboost/R-package/man/cb.early.stop.Rd deleted file mode 100644 index 1a099d7..0000000 --- a/ml-xgboost/R-package/man/cb.early.stop.Rd +++ /dev/null @@ -1,66 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/callbacks.R -\name{cb.early.stop} -\alias{cb.early.stop} -\title{Callback closure to activate the early stopping.} -\usage{ -cb.early.stop( - stopping_rounds, - maximize = FALSE, - metric_name = NULL, - verbose = TRUE -) -} -\arguments{ -\item{stopping_rounds}{The number of rounds with no improvement in -the evaluation metric in order to stop the training.} - -\item{maximize}{whether to maximize the evaluation metric} - -\item{metric_name}{the name of an evaluation column to use as a criteria for early -stopping. If not set, the last column would be used. -Let's say the test data in \code{watchlist} was labelled as \code{dtest}, -and one wants to use the AUC in test data for early stopping regardless of where -it is in the \code{watchlist}, then one of the following would need to be set: -\code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}. -All dash '-' characters in metric names are considered equivalent to '_'.} - -\item{verbose}{whether to print the early stopping information.} -} -\description{ -Callback closure to activate the early stopping. -} -\details{ -This callback function determines the condition for early stopping -by setting the \code{stop_condition = TRUE} flag in its calling frame. - -The following additional fields are assigned to the model's R object: -\itemize{ -\item \code{best_score} the evaluation score at the best iteration -\item \code{best_iteration} at which boosting iteration the best score has occurred (1-based index) -\item \code{best_ntreelimit} to use with the \code{ntreelimit} parameter in \code{predict}. - It differs from \code{best_iteration} in multiclass or random forest settings. -} - -The Same values are also stored as xgb-attributes: -\itemize{ -\item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models) -\item \code{best_msg} message string is also stored. -} - -At least one data element is required in the evaluation watchlist for early stopping to work. - -Callback function expects the following values to be set in its calling frame: -\code{stop_condition}, -\code{bst_evaluation}, -\code{rank}, -\code{bst} (or \code{bst_folds} and \code{basket}), -\code{iteration}, -\code{begin_iteration}, -\code{end_iteration}, -\code{num_parallel_tree}. -} -\seealso{ -\code{\link{callbacks}}, -\code{\link{xgb.attr}} -} diff --git a/ml-xgboost/R-package/man/cb.evaluation.log.Rd b/ml-xgboost/R-package/man/cb.evaluation.log.Rd deleted file mode 100644 index 94f8a02..0000000 --- a/ml-xgboost/R-package/man/cb.evaluation.log.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/callbacks.R -\name{cb.evaluation.log} -\alias{cb.evaluation.log} -\title{Callback closure for logging the evaluation history} -\usage{ -cb.evaluation.log() -} -\description{ -Callback closure for logging the evaluation history -} -\details{ -This callback function appends the current iteration evaluation results \code{bst_evaluation} -available in the calling parent frame to the \code{evaluation_log} list in a calling frame. - -The finalizer callback (called with \code{finalize = TURE} in the end) converts -the \code{evaluation_log} list into a final data.table. - -The iteration evaluation result \code{bst_evaluation} must be a named numeric vector. - -Note: in the column names of the final data.table, the dash '-' character is replaced with -the underscore '_' in order to make the column names more like regular R identifiers. - -Callback function expects the following values to be set in its calling frame: -\code{evaluation_log}, -\code{bst_evaluation}, -\code{iteration}. -} -\seealso{ -\code{\link{callbacks}} -} diff --git a/ml-xgboost/R-package/man/cb.gblinear.history.Rd b/ml-xgboost/R-package/man/cb.gblinear.history.Rd deleted file mode 100644 index 35ebeb6..0000000 --- a/ml-xgboost/R-package/man/cb.gblinear.history.Rd +++ /dev/null @@ -1,95 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/callbacks.R -\name{cb.gblinear.history} -\alias{cb.gblinear.history} -\title{Callback closure for collecting the model coefficients history of a gblinear booster -during its training.} -\usage{ -cb.gblinear.history(sparse = FALSE) -} -\arguments{ -\item{sparse}{when set to FALSE/TURE, a dense/sparse matrix is used to store the result. -Sparse format is useful when one expects only a subset of coefficients to be non-zero, -when using the "thrifty" feature selector with fairly small number of top features -selected per iteration.} -} -\value{ -Results are stored in the \code{coefs} element of the closure. -The \code{\link{xgb.gblinear.history}} convenience function provides an easy way to access it. -With \code{xgb.train}, it is either a dense of a sparse matrix. -While with \code{xgb.cv}, it is a list (an element per each fold) of such matrices. -} -\description{ -Callback closure for collecting the model coefficients history of a gblinear booster -during its training. -} -\details{ -To keep things fast and simple, gblinear booster does not internally store the history of linear -model coefficients at each boosting iteration. This callback provides a workaround for storing -the coefficients' path, by extracting them after each training iteration. - -Callback function expects the following values to be set in its calling frame: -\code{bst} (or \code{bst_folds}). -} -\examples{ -#### Binary classification: -# -# In the iris dataset, it is hard to linearly separate Versicolor class from the rest -# without considering the 2nd order interactions: -require(magrittr) -x <- model.matrix(Species ~ .^2, iris)[,-1] -colnames(x) -dtrain <- xgb.DMatrix(scale(x), label = 1*(iris$Species == "versicolor")) -param <- list(booster = "gblinear", objective = "reg:logistic", eval_metric = "auc", - lambda = 0.0003, alpha = 0.0003, nthread = 2) -# For 'shotgun', which is a default linear updater, using high eta values may result in -# unstable behaviour in some datasets. With this simple dataset, however, the high learning -# rate does not break the convergence, but allows us to illustrate the typical pattern of -# "stochastic explosion" behaviour of this lock-free algorithm at early boosting iterations. -bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 1., - callbacks = list(cb.gblinear.history())) -# Extract the coefficients' path and plot them vs boosting iteration number: -coef_path <- xgb.gblinear.history(bst) -matplot(coef_path, type = 'l') - -# With the deterministic coordinate descent updater, it is safer to use higher learning rates. -# Will try the classical componentwise boosting which selects a single best feature per round: -bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 0.8, - updater = 'coord_descent', feature_selector = 'thrifty', top_k = 1, - callbacks = list(cb.gblinear.history())) -xgb.gblinear.history(bst) \%>\% matplot(type = 'l') -# Componentwise boosting is known to have similar effect to Lasso regularization. -# Try experimenting with various values of top_k, eta, nrounds, -# as well as different feature_selectors. - -# For xgb.cv: -bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 100, eta = 0.8, - callbacks = list(cb.gblinear.history())) -# coefficients in the CV fold #3 -xgb.gblinear.history(bst)[[3]] \%>\% matplot(type = 'l') - - -#### Multiclass classification: -# -dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1) -param <- list(booster = "gblinear", objective = "multi:softprob", num_class = 3, - lambda = 0.0003, alpha = 0.0003, nthread = 2) -# For the default linear updater 'shotgun' it sometimes is helpful -# to use smaller eta to reduce instability -bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5, - callbacks = list(cb.gblinear.history())) -# Will plot the coefficient paths separately for each class: -xgb.gblinear.history(bst, class_index = 0) \%>\% matplot(type = 'l') -xgb.gblinear.history(bst, class_index = 1) \%>\% matplot(type = 'l') -xgb.gblinear.history(bst, class_index = 2) \%>\% matplot(type = 'l') - -# CV: -bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 70, eta = 0.5, - callbacks = list(cb.gblinear.history(FALSE))) -# 1st forld of 1st class -xgb.gblinear.history(bst, class_index = 0)[[1]] \%>\% matplot(type = 'l') - -} -\seealso{ -\code{\link{callbacks}}, \code{\link{xgb.gblinear.history}}. -} diff --git a/ml-xgboost/R-package/man/cb.print.evaluation.Rd b/ml-xgboost/R-package/man/cb.print.evaluation.Rd deleted file mode 100644 index 59b9ba6..0000000 --- a/ml-xgboost/R-package/man/cb.print.evaluation.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/callbacks.R -\name{cb.print.evaluation} -\alias{cb.print.evaluation} -\title{Callback closure for printing the result of evaluation} -\usage{ -cb.print.evaluation(period = 1, showsd = TRUE) -} -\arguments{ -\item{period}{results would be printed every number of periods} - -\item{showsd}{whether standard deviations should be printed (when available)} -} -\description{ -Callback closure for printing the result of evaluation -} -\details{ -The callback function prints the result of evaluation at every \code{period} iterations. -The initial and the last iteration's evaluations are always printed. - -Callback function expects the following values to be set in its calling frame: -\code{bst_evaluation} (also \code{bst_evaluation_err} when available), -\code{iteration}, -\code{begin_iteration}, -\code{end_iteration}. -} -\seealso{ -\code{\link{callbacks}} -} diff --git a/ml-xgboost/R-package/man/cb.reset.parameters.Rd b/ml-xgboost/R-package/man/cb.reset.parameters.Rd deleted file mode 100644 index ee0a5d1..0000000 --- a/ml-xgboost/R-package/man/cb.reset.parameters.Rd +++ /dev/null @@ -1,36 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/callbacks.R -\name{cb.reset.parameters} -\alias{cb.reset.parameters} -\title{Callback closure for resetting the booster's parameters at each iteration.} -\usage{ -cb.reset.parameters(new_params) -} -\arguments{ -\item{new_params}{a list where each element corresponds to a parameter that needs to be reset. -Each element's value must be either a vector of values of length \code{nrounds} -to be set at each iteration, -or a function of two parameters \code{learning_rates(iteration, nrounds)} -which returns a new parameter value by using the current iteration number -and the total number of boosting rounds.} -} -\description{ -Callback closure for resetting the booster's parameters at each iteration. -} -\details{ -This is a "pre-iteration" callback function used to reset booster's parameters -at the beginning of each iteration. - -Note that when training is resumed from some previous model, and a function is used to -reset a parameter value, the \code{nrounds} argument in this function would be the -the number of boosting rounds in the current training. - -Callback function expects the following values to be set in its calling frame: -\code{bst} or \code{bst_folds}, -\code{iteration}, -\code{begin_iteration}, -\code{end_iteration}. -} -\seealso{ -\code{\link{callbacks}} -} diff --git a/ml-xgboost/R-package/man/cb.save.model.Rd b/ml-xgboost/R-package/man/cb.save.model.Rd deleted file mode 100644 index fd564b3..0000000 --- a/ml-xgboost/R-package/man/cb.save.model.Rd +++ /dev/null @@ -1,33 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/callbacks.R -\name{cb.save.model} -\alias{cb.save.model} -\title{Callback closure for saving a model file.} -\usage{ -cb.save.model(save_period = 0, save_name = "xgboost.model") -} -\arguments{ -\item{save_period}{save the model to disk after every -\code{save_period} iterations; 0 means save the model at the end.} - -\item{save_name}{the name or path for the saved model file. -It can contain a \code{\link[base]{sprintf}} formatting specifier -to include the integer iteration number in the file name. -E.g., with \code{save_name} = 'xgboost_%04d.model', -the file saved at iteration 50 would be named "xgboost_0050.model".} -} -\description{ -Callback closure for saving a model file. -} -\details{ -This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end. - -Callback function expects the following values to be set in its calling frame: -\code{bst}, -\code{iteration}, -\code{begin_iteration}, -\code{end_iteration}. -} -\seealso{ -\code{\link{callbacks}} -} diff --git a/ml-xgboost/R-package/man/dim.xgb.DMatrix.Rd b/ml-xgboost/R-package/man/dim.xgb.DMatrix.Rd deleted file mode 100644 index 76c53de..0000000 --- a/ml-xgboost/R-package/man/dim.xgb.DMatrix.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.DMatrix.R -\name{dim.xgb.DMatrix} -\alias{dim.xgb.DMatrix} -\title{Dimensions of xgb.DMatrix} -\usage{ -\method{dim}{xgb.DMatrix}(x) -} -\arguments{ -\item{x}{Object of class \code{xgb.DMatrix}} -} -\description{ -Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}. -} -\details{ -Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also -be directly used with an \code{xgb.DMatrix} object. -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train -dtrain <- xgb.DMatrix(train$data, label=train$label) - -stopifnot(nrow(dtrain) == nrow(train$data)) -stopifnot(ncol(dtrain) == ncol(train$data)) -stopifnot(all(dim(dtrain) == dim(train$data))) - -} diff --git a/ml-xgboost/R-package/man/dimnames.xgb.DMatrix.Rd b/ml-xgboost/R-package/man/dimnames.xgb.DMatrix.Rd deleted file mode 100644 index 032cb95..0000000 --- a/ml-xgboost/R-package/man/dimnames.xgb.DMatrix.Rd +++ /dev/null @@ -1,35 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.DMatrix.R -\name{dimnames.xgb.DMatrix} -\alias{dimnames.xgb.DMatrix} -\alias{dimnames<-.xgb.DMatrix} -\title{Handling of column names of \code{xgb.DMatrix}} -\usage{ -\method{dimnames}{xgb.DMatrix}(x) - -\method{dimnames}{xgb.DMatrix}(x) <- value -} -\arguments{ -\item{x}{object of class \code{xgb.DMatrix}} - -\item{value}{a list of two elements: the first one is ignored -and the second one is column names} -} -\description{ -Only column names are supported for \code{xgb.DMatrix}, thus setting of -row names would have no effect and returned row names would be NULL. -} -\details{ -Generic \code{dimnames} methods are used by \code{colnames}. -Since row names are irrelevant, it is recommended to use \code{colnames} directly. -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train -dtrain <- xgb.DMatrix(train$data, label=train$label) -dimnames(dtrain) -colnames(dtrain) -colnames(dtrain) <- make.names(1:ncol(train$data)) -print(dtrain, verbose=TRUE) - -} diff --git a/ml-xgboost/R-package/man/getinfo.Rd b/ml-xgboost/R-package/man/getinfo.Rd deleted file mode 100644 index 1751c48..0000000 --- a/ml-xgboost/R-package/man/getinfo.Rd +++ /dev/null @@ -1,45 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.DMatrix.R -\name{getinfo} -\alias{getinfo} -\alias{getinfo.xgb.DMatrix} -\title{Get information of an xgb.DMatrix object} -\usage{ -getinfo(object, ...) - -\method{getinfo}{xgb.DMatrix}(object, name, ...) -} -\arguments{ -\item{object}{Object of class \code{xgb.DMatrix}} - -\item{...}{other parameters} - -\item{name}{the name of the information field to get (see details)} -} -\description{ -Get information of an xgb.DMatrix object -} -\details{ -The \code{name} field can be one of the following: - -\itemize{ - \item \code{label}: label Xgboost learn from ; - \item \code{weight}: to do a weight rescale ; - \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ; - \item \code{nrow}: number of rows of the \code{xgb.DMatrix}. - -} - -\code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}. -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train -dtrain <- xgb.DMatrix(train$data, label=train$label) - -labels <- getinfo(dtrain, 'label') -setinfo(dtrain, 'label', 1-labels) - -labels2 <- getinfo(dtrain, 'label') -stopifnot(all(labels2 == 1-labels)) -} diff --git a/ml-xgboost/R-package/man/predict.xgb.Booster.Rd b/ml-xgboost/R-package/man/predict.xgb.Booster.Rd deleted file mode 100644 index 6430eab..0000000 --- a/ml-xgboost/R-package/man/predict.xgb.Booster.Rd +++ /dev/null @@ -1,202 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.Booster.R -\name{predict.xgb.Booster} -\alias{predict.xgb.Booster} -\alias{predict.xgb.Booster.handle} -\title{Predict method for eXtreme Gradient Boosting model} -\usage{ -\method{predict}{xgb.Booster}( - object, - newdata, - missing = NA, - outputmargin = FALSE, - ntreelimit = NULL, - predleaf = FALSE, - predcontrib = FALSE, - approxcontrib = FALSE, - predinteraction = FALSE, - reshape = FALSE, - training = FALSE, - ... -) - -\method{predict}{xgb.Booster.handle}(object, ...) -} -\arguments{ -\item{object}{Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}} - -\item{newdata}{takes \code{matrix}, \code{dgCMatrix}, local data file or \code{xgb.DMatrix}.} - -\item{missing}{Missing is only used when input is dense matrix. Pick a float value that represents -missing values in data (e.g., sometimes 0 or some other extreme value is used).} - -\item{outputmargin}{whether the prediction should be returned in the for of original untransformed -sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for -logistic regression would result in predictions for log-odds instead of probabilities.} - -\item{ntreelimit}{limit the number of model's trees or boosting iterations used in prediction (see Details). -It will use all the trees by default (\code{NULL} value).} - -\item{predleaf}{whether predict leaf index.} - -\item{predcontrib}{whether to return feature contributions to individual predictions (see Details).} - -\item{approxcontrib}{whether to use a fast approximation for feature contributions (see Details).} - -\item{predinteraction}{whether to return contributions of feature interactions to individual predictions (see Details).} - -\item{reshape}{whether to reshape the vector of predictions to a matrix form when there are several -prediction outputs per case. This option has no effect when either of predleaf, predcontrib, -or predinteraction flags is TRUE.} - -\item{training}{whether is the prediction result used for training. For dart booster, -training predicting will perform dropout.} - -\item{...}{Parameters passed to \code{predict.xgb.Booster}} -} -\value{ -For regression or binary classification, it returns a vector of length \code{nrows(newdata)}. -For multiclass classification, either a \code{num_class * nrows(newdata)} vector or -a \code{(nrows(newdata), num_class)} dimension matrix is returned, depending on -the \code{reshape} value. - -When \code{predleaf = TRUE}, the output is a matrix object with the -number of columns corresponding to the number of trees. - -When \code{predcontrib = TRUE} and it is not a multiclass setting, the output is a matrix object with -\code{num_features + 1} columns. The last "+ 1" column in a matrix corresponds to bias. -For a multiclass case, a list of \code{num_class} elements is returned, where each element is -such a matrix. The contribution values are on the scale of untransformed margin -(e.g., for binary classification would mean that the contributions are log-odds deviations from bias). - -When \code{predinteraction = TRUE} and it is not a multiclass setting, the output is a 3d array with -dimensions \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions) -elements represent different features interaction contributions. The array is symmetric WRT the last -two dimensions. The "+ 1" columns corresponds to bias. Summing this array along the last dimension should -produce practically the same result as predict with \code{predcontrib = TRUE}. -For a multiclass case, a list of \code{num_class} elements is returned, where each element is -such an array. -} -\description{ -Predicted values based on either xgboost model or model handle object. -} -\details{ -Note that \code{ntreelimit} is not necessarily equal to the number of boosting iterations -and it is not necessarily equal to the number of trees in a model. -E.g., in a random forest-like model, \code{ntreelimit} would limit the number of trees. -But for multiclass classification, while there are multiple trees per iteration, -\code{ntreelimit} limits the number of boosting iterations. - -Also note that \code{ntreelimit} would currently do nothing for predictions from gblinear, -since gblinear doesn't keep its boosting history. - -One possible practical applications of the \code{predleaf} option is to use the model -as a generator of new features which capture non-linearity and interactions, -e.g., as implemented in \code{\link{xgb.create.features}}. - -Setting \code{predcontrib = TRUE} allows to calculate contributions of each feature to -individual predictions. For "gblinear" booster, feature contributions are simply linear terms -(feature_beta * feature_value). For "gbtree" booster, feature contributions are SHAP -values (Lundberg 2017) that sum to the difference between the expected output -of the model and the current prediction (where the hessian weights are used to compute the expectations). -Setting \code{approxcontrib = TRUE} approximates these values following the idea explained -in \url{http://blog.datadive.net/interpreting-random-forests/}. - -With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features -are computed. Note that this operation might be rather expensive in terms of compute and memory. -Since it quadratically depends on the number of features, it is recommended to perform selection -of the most important features first. See below about the format of the returned results. -} -\examples{ -## binary classification: - -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test - -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 0.5, nthread = 2, nrounds = 5, objective = "binary:logistic") -# use all trees by default -pred <- predict(bst, test$data) -# use only the 1st tree -pred1 <- predict(bst, test$data, ntreelimit = 1) - -# Predicting tree leafs: -# the result is an nsamples X ntrees matrix -pred_leaf <- predict(bst, test$data, predleaf = TRUE) -str(pred_leaf) - -# Predicting feature contributions to predictions: -# the result is an nsamples X (nfeatures + 1) matrix -pred_contr <- predict(bst, test$data, predcontrib = TRUE) -str(pred_contr) -# verify that contributions' sums are equal to log-odds of predictions (up to float precision): -summary(rowSums(pred_contr) - qlogis(pred)) -# for the 1st record, let's inspect its features that had non-zero contribution to prediction: -contr1 <- pred_contr[1,] -contr1 <- contr1[-length(contr1)] # drop BIAS -contr1 <- contr1[contr1 != 0] # drop non-contributing features -contr1 <- contr1[order(abs(contr1))] # order by contribution magnitude -old_mar <- par("mar") -par(mar = old_mar + c(0,7,0,0)) -barplot(contr1, horiz = TRUE, las = 2, xlab = "contribution to prediction in log-odds") -par(mar = old_mar) - - -## multiclass classification in iris dataset: - -lb <- as.numeric(iris$Species) - 1 -num_class <- 3 -set.seed(11) -bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, - max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5, - objective = "multi:softprob", num_class = num_class) -# predict for softmax returns num_class probability numbers per case: -pred <- predict(bst, as.matrix(iris[, -5])) -str(pred) -# reshape it to a num_class-columns matrix -pred <- matrix(pred, ncol=num_class, byrow=TRUE) -# convert the probabilities to softmax labels -pred_labels <- max.col(pred) - 1 -# the following should result in the same error as seen in the last iteration -sum(pred_labels != lb)/length(lb) - -# compare that to the predictions from softmax: -set.seed(11) -bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, - max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5, - objective = "multi:softmax", num_class = num_class) -pred <- predict(bst, as.matrix(iris[, -5])) -str(pred) -all.equal(pred, pred_labels) -# prediction from using only 5 iterations should result -# in the same error as seen in iteration 5: -pred5 <- predict(bst, as.matrix(iris[, -5]), ntreelimit=5) -sum(pred5 != lb)/length(lb) - - -## random forest-like model of 25 trees for binary classification: - -set.seed(11) -bst <- xgboost(data = train$data, label = train$label, max_depth = 5, - nthread = 2, nrounds = 1, objective = "binary:logistic", - num_parallel_tree = 25, subsample = 0.6, colsample_bytree = 0.1) -# Inspect the prediction error vs number of trees: -lb <- test$label -dtest <- xgb.DMatrix(test$data, label=lb) -err <- sapply(1:25, function(n) { - pred <- predict(bst, dtest, ntreelimit=n) - sum((pred > 0.5) != lb)/length(lb) -}) -plot(err, type='l', ylim=c(0,0.1), xlab='#trees') - -} -\references{ -Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874} - -Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060} -} -\seealso{ -\code{\link{xgb.train}}. -} diff --git a/ml-xgboost/R-package/man/print.xgb.Booster.Rd b/ml-xgboost/R-package/man/print.xgb.Booster.Rd deleted file mode 100644 index d684882..0000000 --- a/ml-xgboost/R-package/man/print.xgb.Booster.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.Booster.R -\name{print.xgb.Booster} -\alias{print.xgb.Booster} -\title{Print xgb.Booster} -\usage{ -\method{print}{xgb.Booster}(x, verbose = FALSE, ...) -} -\arguments{ -\item{x}{an xgb.Booster object} - -\item{verbose}{whether to print detailed data (e.g., attribute values)} - -\item{...}{not currently used} -} -\description{ -Print information about xgb.Booster. -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -attr(bst, 'myattr') <- 'memo' - -print(bst) -print(bst, verbose=TRUE) - -} diff --git a/ml-xgboost/R-package/man/print.xgb.DMatrix.Rd b/ml-xgboost/R-package/man/print.xgb.DMatrix.Rd deleted file mode 100644 index b1dd01b..0000000 --- a/ml-xgboost/R-package/man/print.xgb.DMatrix.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.DMatrix.R -\name{print.xgb.DMatrix} -\alias{print.xgb.DMatrix} -\title{Print xgb.DMatrix} -\usage{ -\method{print}{xgb.DMatrix}(x, verbose = FALSE, ...) -} -\arguments{ -\item{x}{an xgb.DMatrix object} - -\item{verbose}{whether to print colnames (when present)} - -\item{...}{not currently used} -} -\description{ -Print information about xgb.DMatrix. -Currently it displays dimensions and presence of info-fields and colnames. -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train -dtrain <- xgb.DMatrix(train$data, label=train$label) - -dtrain -print(dtrain, verbose=TRUE) - -} diff --git a/ml-xgboost/R-package/man/print.xgb.cv.Rd b/ml-xgboost/R-package/man/print.xgb.cv.Rd deleted file mode 100644 index 05ad61e..0000000 --- a/ml-xgboost/R-package/man/print.xgb.cv.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.cv.R -\name{print.xgb.cv.synchronous} -\alias{print.xgb.cv.synchronous} -\title{Print xgb.cv result} -\usage{ -\method{print}{xgb.cv.synchronous}(x, verbose = FALSE, ...) -} -\arguments{ -\item{x}{an \code{xgb.cv.synchronous} object} - -\item{verbose}{whether to print detailed data} - -\item{...}{passed to \code{data.table.print}} -} -\description{ -Prints formatted results of \code{xgb.cv}. -} -\details{ -When not verbose, it would only print the evaluation results, -including the best iteration (when available). -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train -cv <- xgb.cv(data = train$data, label = train$label, nfold = 5, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -print(cv) -print(cv, verbose=TRUE) - -} diff --git a/ml-xgboost/R-package/man/setinfo.Rd b/ml-xgboost/R-package/man/setinfo.Rd deleted file mode 100644 index e133d3a..0000000 --- a/ml-xgboost/R-package/man/setinfo.Rd +++ /dev/null @@ -1,43 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.DMatrix.R -\name{setinfo} -\alias{setinfo} -\alias{setinfo.xgb.DMatrix} -\title{Set information of an xgb.DMatrix object} -\usage{ -setinfo(object, ...) - -\method{setinfo}{xgb.DMatrix}(object, name, info, ...) -} -\arguments{ -\item{object}{Object of class "xgb.DMatrix"} - -\item{...}{other parameters} - -\item{name}{the name of the field to get} - -\item{info}{the specific field of information to set} -} -\description{ -Set information of an xgb.DMatrix object -} -\details{ -The \code{name} field can be one of the following: - -\itemize{ - \item \code{label}: label Xgboost learn from ; - \item \code{weight}: to do a weight rescale ; - \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ; - \item \code{group}: number of rows in each group (to use with \code{rank:pairwise} objective). -} -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train -dtrain <- xgb.DMatrix(train$data, label=train$label) - -labels <- getinfo(dtrain, 'label') -setinfo(dtrain, 'label', 1-labels) -labels2 <- getinfo(dtrain, 'label') -stopifnot(all.equal(labels2, 1-labels)) -} diff --git a/ml-xgboost/R-package/man/slice.xgb.DMatrix.Rd b/ml-xgboost/R-package/man/slice.xgb.DMatrix.Rd deleted file mode 100644 index 9f27d4b..0000000 --- a/ml-xgboost/R-package/man/slice.xgb.DMatrix.Rd +++ /dev/null @@ -1,40 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.DMatrix.R -\name{slice} -\alias{slice} -\alias{slice.xgb.DMatrix} -\alias{[.xgb.DMatrix} -\title{Get a new DMatrix containing the specified rows of -original xgb.DMatrix object} -\usage{ -slice(object, ...) - -\method{slice}{xgb.DMatrix}(object, idxset, ...) - -\method{[}{xgb.DMatrix}(object, idxset, colset = NULL) -} -\arguments{ -\item{object}{Object of class "xgb.DMatrix"} - -\item{...}{other parameters (currently not used)} - -\item{idxset}{a integer vector of indices of rows needed} - -\item{colset}{currently not used (columns subsetting is not available)} -} -\description{ -Get a new DMatrix containing the specified rows of -original xgb.DMatrix object -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train -dtrain <- xgb.DMatrix(train$data, label=train$label) - -dsub <- slice(dtrain, 1:42) -labels1 <- getinfo(dsub, 'label') -dsub <- dtrain[1:42, ] -labels2 <- getinfo(dsub, 'label') -all.equal(labels1, labels2) - -} diff --git a/ml-xgboost/R-package/man/xgb.Booster.complete.Rd b/ml-xgboost/R-package/man/xgb.Booster.complete.Rd deleted file mode 100644 index 2b38b4c..0000000 --- a/ml-xgboost/R-package/man/xgb.Booster.complete.Rd +++ /dev/null @@ -1,50 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.Booster.R -\name{xgb.Booster.complete} -\alias{xgb.Booster.complete} -\title{Restore missing parts of an incomplete xgb.Booster object.} -\usage{ -xgb.Booster.complete(object, saveraw = TRUE) -} -\arguments{ -\item{object}{object of class \code{xgb.Booster}} - -\item{saveraw}{a flag indicating whether to append \code{raw} Booster memory dump data -when it doesn't already exist.} -} -\value{ -An object of \code{xgb.Booster} class. -} -\description{ -It attempts to complete an \code{xgb.Booster} object by restoring either its missing -raw model memory dump (when it has no \code{raw} data but its \code{xgb.Booster.handle} is valid) -or its missing internal handle (when its \code{xgb.Booster.handle} is not valid -but it has a raw Booster memory dump). -} -\details{ -While this method is primarily for internal use, it might be useful in some practical situations. - -E.g., when an \code{xgb.Booster} model is saved as an R object and then is loaded as an R object, -its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods -should still work for such a model object since those methods would be using -\code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the -\code{xgb.Booster.complete} function explicitly once after loading a model as an R-object. -That would prevent further repeated implicit reconstruction of an internal booster model. -} -\examples{ - -data(agaricus.train, package='xgboost') -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -saveRDS(bst, "xgb.model.rds") - -bst1 <- readRDS("xgb.model.rds") -if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds") -# the handle is invalid: -print(bst1$handle) - -bst1 <- xgb.Booster.complete(bst1) -# now the handle points to a valid internal booster model: -print(bst1$handle) - -} diff --git a/ml-xgboost/R-package/man/xgb.DMatrix.Rd b/ml-xgboost/R-package/man/xgb.DMatrix.Rd deleted file mode 100644 index c3d47a9..0000000 --- a/ml-xgboost/R-package/man/xgb.DMatrix.Rd +++ /dev/null @@ -1,35 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.DMatrix.R -\name{xgb.DMatrix} -\alias{xgb.DMatrix} -\title{Construct xgb.DMatrix object} -\usage{ -xgb.DMatrix(data, info = list(), missing = NA, silent = FALSE, ...) -} -\arguments{ -\item{data}{a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character -string representing a filename.} - -\item{info}{a named list of additional information to store in the \code{xgb.DMatrix} object. -See \code{\link{setinfo}} for the specific allowed kinds of} - -\item{missing}{a float value to represents missing values in data (used only when input is a dense matrix). -It is useful when a 0 or some other extreme value represents missing values in data.} - -\item{silent}{whether to suppress printing an informational message after loading from a file.} - -\item{...}{the \code{info} data could be passed directly as parameters, without creating an \code{info} list.} -} -\description{ -Construct xgb.DMatrix object from either a dense matrix, a sparse matrix, or a local file. -Supported input file formats are either a libsvm text file or a binary file that was created previously by -\code{\link{xgb.DMatrix.save}}). -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train -dtrain <- xgb.DMatrix(train$data, label=train$label) -xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data') -dtrain <- xgb.DMatrix('xgb.DMatrix.data') -if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data') -} diff --git a/ml-xgboost/R-package/man/xgb.DMatrix.save.Rd b/ml-xgboost/R-package/man/xgb.DMatrix.save.Rd deleted file mode 100644 index 7f25c5a..0000000 --- a/ml-xgboost/R-package/man/xgb.DMatrix.save.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.DMatrix.save.R -\name{xgb.DMatrix.save} -\alias{xgb.DMatrix.save} -\title{Save xgb.DMatrix object to binary file} -\usage{ -xgb.DMatrix.save(dmatrix, fname) -} -\arguments{ -\item{dmatrix}{the \code{xgb.DMatrix} object} - -\item{fname}{the name of the file to write.} -} -\description{ -Save xgb.DMatrix object to binary file -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train -dtrain <- xgb.DMatrix(train$data, label=train$label) -xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data') -dtrain <- xgb.DMatrix('xgb.DMatrix.data') -if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data') -} diff --git a/ml-xgboost/R-package/man/xgb.attr.Rd b/ml-xgboost/R-package/man/xgb.attr.Rd deleted file mode 100644 index 03779e4..0000000 --- a/ml-xgboost/R-package/man/xgb.attr.Rd +++ /dev/null @@ -1,86 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.Booster.R -\name{xgb.attr} -\alias{xgb.attr} -\alias{xgb.attr<-} -\alias{xgb.attributes} -\alias{xgb.attributes<-} -\title{Accessors for serializable attributes of a model.} -\usage{ -xgb.attr(object, name) - -xgb.attr(object, name) <- value - -xgb.attributes(object) - -xgb.attributes(object) <- value -} -\arguments{ -\item{object}{Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}.} - -\item{name}{a non-empty character string specifying which attribute is to be accessed.} - -\item{value}{a value of an attribute for \code{xgb.attr<-}; for \code{xgb.attributes<-} -it's a list (or an object coercible to a list) with the names of attributes to set -and the elements corresponding to attribute values. -Non-character values are converted to character. -When attribute value is not a scalar, only the first index is used. -Use \code{NULL} to remove an attribute.} -} -\value{ -\code{xgb.attr} returns either a string value of an attribute -or \code{NULL} if an attribute wasn't stored in a model. - -\code{xgb.attributes} returns a list of all attribute stored in a model -or \code{NULL} if a model has no stored attributes. -} -\description{ -These methods allow to manipulate the key-value attribute strings of an xgboost model. -} -\details{ -The primary purpose of xgboost model attributes is to store some meta-data about the model. -Note that they are a separate concept from the object attributes in R. -Specifically, they refer to key-value strings that can be attached to an xgboost model, -stored together with the model's binary representation, and accessed later -(from R or any other interface). -In contrast, any R-attribute assigned to an R-object of \code{xgb.Booster} class -would not be saved by \code{xgb.save} because an xgboost model is an external memory object -and its serialization is handled externally. -Also, setting an attribute that has the same name as one of xgboost's parameters wouldn't -change the value of that parameter for a model. -Use \code{\link{xgb.parameters<-}} to set or change model parameters. - -The attribute setters would usually work more efficiently for \code{xgb.Booster.handle} -than for \code{xgb.Booster}, since only just a handle (pointer) would need to be copied. -That would only matter if attributes need to be set many times. -Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters, -the raw model cache of an \code{xgb.Booster} object would not be automatically updated, -and it would be user's responsibility to call \code{xgb.serialize} to update it. - -The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes, -but it doesn't delete the other existing attributes. -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train - -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") - -xgb.attr(bst, "my_attribute") <- "my attribute value" -print(xgb.attr(bst, "my_attribute")) -xgb.attributes(bst) <- list(a = 123, b = "abc") - -xgb.save(bst, 'xgb.model') -bst1 <- xgb.load('xgb.model') -if (file.exists('xgb.model')) file.remove('xgb.model') -print(xgb.attr(bst1, "my_attribute")) -print(xgb.attributes(bst1)) - -# deletion: -xgb.attr(bst1, "my_attribute") <- NULL -print(xgb.attributes(bst1)) -xgb.attributes(bst1) <- list(a = NULL, b = NULL) -print(xgb.attributes(bst1)) - -} diff --git a/ml-xgboost/R-package/man/xgb.config.Rd b/ml-xgboost/R-package/man/xgb.config.Rd deleted file mode 100644 index a5187c8..0000000 --- a/ml-xgboost/R-package/man/xgb.config.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.Booster.R -\name{xgb.config} -\alias{xgb.config} -\alias{xgb.config<-} -\title{Accessors for model parameters as JSON string.} -\usage{ -xgb.config(object) - -xgb.config(object) <- value -} -\arguments{ -\item{object}{Object of class \code{xgb.Booster}} - -\item{value}{A JSON string.} -} -\description{ -Accessors for model parameters as JSON string. -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train - -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -config <- xgb.config(bst) - -} diff --git a/ml-xgboost/R-package/man/xgb.create.features.Rd b/ml-xgboost/R-package/man/xgb.create.features.Rd deleted file mode 100644 index 9c59d90..0000000 --- a/ml-xgboost/R-package/man/xgb.create.features.Rd +++ /dev/null @@ -1,92 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.create.features.R -\name{xgb.create.features} -\alias{xgb.create.features} -\title{Create new features from a previously learned model} -\usage{ -xgb.create.features(model, data, ...) -} -\arguments{ -\item{model}{decision tree boosting model learned on the original data} - -\item{data}{original data (usually provided as a \code{dgCMatrix} matrix)} - -\item{...}{currently not used} -} -\value{ -\code{dgCMatrix} matrix including both the original data and the new features. -} -\description{ -May improve the learning by adding new features to the training data based on the decision trees from a previously learned model. -} -\details{ -This is the function inspired from the paragraph 3.1 of the paper: - -\strong{Practical Lessons from Predicting Clicks on Ads at Facebook} - -\emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers, -Joaquin Quinonero Candela)} - -International Workshop on Data Mining for Online Advertising (ADKDD) - August 24, 2014 - -\url{https://research.fb.com/publications/practical-lessons-from-predicting-clicks-on-ads-at-facebook/}. - -Extract explaining the method: - -"We found that boosted decision trees are a powerful and very -convenient way to implement non-linear and tuple transformations -of the kind we just described. We treat each individual -tree as a categorical feature that takes as value the -index of the leaf an instance ends up falling in. We use -1-of-K coding of this type of features. - -For example, consider the boosted tree model in Figure 1 with 2 subtrees, -where the first subtree has 3 leafs and the second 2 leafs. If an -instance ends up in leaf 2 in the first subtree and leaf 1 in -second subtree, the overall input to the linear classifier will -be the binary vector \code{[0, 1, 0, 1, 0]}, where the first 3 entries -correspond to the leaves of the first subtree and last 2 to -those of the second subtree. - -[...] - -We can understand boosted decision tree -based transformation as a supervised feature encoding that -converts a real-valued vector into a compact binary-valued -vector. A traversal from root node to a leaf node represents -a rule on certain features." -} -\examples{ -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label) - -param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic') -nrounds = 4 - -bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2) - -# Model accuracy without new features -accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / - length(agaricus.test$label) - -# Convert previous features to one hot encoding -new.features.train <- xgb.create.features(model = bst, agaricus.train$data) -new.features.test <- xgb.create.features(model = bst, agaricus.test$data) - -# learning with new features -new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label) -new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label) -watchlist <- list(train = new.dtrain) -bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2) - -# Model accuracy with new features -accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / - length(agaricus.test$label) - -# Here the accuracy was already good and is now perfect. -cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", - accuracy.after, "!\n")) - -} diff --git a/ml-xgboost/R-package/man/xgb.cv.Rd b/ml-xgboost/R-package/man/xgb.cv.Rd deleted file mode 100644 index 8cb03a5..0000000 --- a/ml-xgboost/R-package/man/xgb.cv.Rd +++ /dev/null @@ -1,164 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.cv.R -\name{xgb.cv} -\alias{xgb.cv} -\title{Cross Validation} -\usage{ -xgb.cv( - params = list(), - data, - nrounds, - nfold, - label = NULL, - missing = NA, - prediction = FALSE, - showsd = TRUE, - metrics = list(), - obj = NULL, - feval = NULL, - stratified = TRUE, - folds = NULL, - train_folds = NULL, - verbose = TRUE, - print_every_n = 1L, - early_stopping_rounds = NULL, - maximize = NULL, - callbacks = list(), - ... -) -} -\arguments{ -\item{params}{the list of parameters. Commonly used ones are: -\itemize{ - \item \code{objective} objective function, common ones are - \itemize{ - \item \code{reg:squarederror} Regression with squared loss - \item \code{binary:logistic} logistic regression for classification - } - \item \code{eta} step size of each boosting step - \item \code{max_depth} maximum depth of the tree - \item \code{nthread} number of thread used in training, if not set, all threads are used -} - - See \code{\link{xgb.train}} for further details. - See also demo/ for walkthrough example in R.} - -\item{data}{takes an \code{xgb.DMatrix}, \code{matrix}, or \code{dgCMatrix} as the input.} - -\item{nrounds}{the max number of iterations} - -\item{nfold}{the original dataset is randomly partitioned into \code{nfold} equal size subsamples.} - -\item{label}{vector of response values. Should be provided only when data is an R-matrix.} - -\item{missing}{is only used when input is a dense matrix. By default is set to NA, which means -that NA values should be considered as 'missing' by the algorithm. -Sometimes, 0 or other extreme value might be used to represent missing values.} - -\item{prediction}{A logical value indicating whether to return the test fold predictions -from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.} - -\item{showsd}{\code{boolean}, whether to show standard deviation of cross validation} - -\item{metrics, }{list of evaluation metrics to be used in cross validation, - when it is not specified, the evaluation metric is chosen according to objective function. - Possible options are: -\itemize{ - \item \code{error} binary classification error rate - \item \code{rmse} Rooted mean square error - \item \code{logloss} negative log-likelihood function - \item \code{auc} Area under curve - \item \code{aucpr} Area under PR curve - \item \code{merror} Exact matching error, used to evaluate multi-class classification -}} - -\item{obj}{customized objective function. Returns gradient and second order -gradient with given prediction and dtrain.} - -\item{feval}{customized evaluation function. Returns -\code{list(metric='metric-name', value='metric-value')} with given -prediction and dtrain.} - -\item{stratified}{a \code{boolean} indicating whether sampling of folds should be stratified -by the values of outcome labels.} - -\item{folds}{\code{list} provides a possibility to use a list of pre-defined CV folds -(each element must be a vector of test fold's indices). When folds are supplied, -the \code{nfold} and \code{stratified} parameters are ignored.} - -\item{train_folds}{\code{list} list specifying which indicies to use for training. If \code{NULL} -(the default) all indices not specified in \code{folds} will be used for training.} - -\item{verbose}{\code{boolean}, print the statistics during the process} - -\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}. -Default is 1 which means all messages are printed. This parameter is passed to the -\code{\link{cb.print.evaluation}} callback.} - -\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered. -If set to an integer \code{k}, training with a validation set will stop if the performance -doesn't improve for \code{k} rounds. -Setting this parameter engages the \code{\link{cb.early.stop}} callback.} - -\item{maximize}{If \code{feval} and \code{early_stopping_rounds} are set, -then this parameter must be set as well. -When it is \code{TRUE}, it means the larger the evaluation score the better. -This parameter is passed to the \code{\link{cb.early.stop}} callback.} - -\item{callbacks}{a list of callback functions to perform various task during boosting. -See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the -parameters' values. User can provide either existing or their own callback methods in order -to customize the training process.} - -\item{...}{other parameters to pass to \code{params}.} -} -\value{ -An object of class \code{xgb.cv.synchronous} with the following elements: -\itemize{ - \item \code{call} a function call. - \item \code{params} parameters that were passed to the xgboost library. Note that it does not - capture parameters changed by the \code{\link{cb.reset.parameters}} callback. - \item \code{callbacks} callback functions that were either automatically assigned or - explicitly passed. - \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the - first column corresponding to iteration number and the rest corresponding to the - CV-based evaluation means and standard deviations for the training and test CV-sets. - It is created by the \code{\link{cb.evaluation.log}} callback. - \item \code{niter} number of boosting iterations. - \item \code{nfeatures} number of features in training data. - \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds} - parameter or randomly generated. - \item \code{best_iteration} iteration number with the best evaluation metric value - (only available with early stopping). - \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration, - which could further be used in \code{predict} method - (only available with early stopping). - \item \code{pred} CV prediction values available when \code{prediction} is set. - It is either vector or matrix (see \code{\link{cb.cv.predict}}). - \item \code{models} a list of the CV folds' models. It is only available with the explicit - setting of the \code{cb.cv.predict(save_models = TRUE)} callback. -} -} -\description{ -The cross validation function of xgboost -} -\details{ -The original sample is randomly partitioned into \code{nfold} equal size subsamples. - -Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data. - -The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data. - -All observations are used for both training and validation. - -Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation} -} -\examples{ -data(agaricus.train, package='xgboost') -dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -cv <- xgb.cv(data = dtrain, nrounds = 3, nthread = 2, nfold = 5, metrics = list("rmse","auc"), - max_depth = 3, eta = 1, objective = "binary:logistic") -print(cv) -print(cv, verbose=TRUE) - -} diff --git a/ml-xgboost/R-package/man/xgb.dump.Rd b/ml-xgboost/R-package/man/xgb.dump.Rd deleted file mode 100644 index 210c6e2..0000000 --- a/ml-xgboost/R-package/man/xgb.dump.Rd +++ /dev/null @@ -1,62 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.dump.R -\name{xgb.dump} -\alias{xgb.dump} -\title{Dump an xgboost model in text format.} -\usage{ -xgb.dump( - model, - fname = NULL, - fmap = "", - with_stats = FALSE, - dump_format = c("text", "json"), - ... -) -} -\arguments{ -\item{model}{the model object.} - -\item{fname}{the name of the text file where to save the model text dump. -If not provided or set to \code{NULL}, the model is returned as a \code{character} vector.} - -\item{fmap}{feature map file representing feature types. -Detailed description could be found at -\url{https://github.com/dmlc/xgboost/wiki/Binary-Classification#dump-model}. -See demo/ for walkthrough example in R, and -\url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt} -for example Format.} - -\item{with_stats}{whether to dump some additional statistics about the splits. -When this option is on, the model dump contains two additional values: -gain is the approximate loss function gain we get in each split; -cover is the sum of second order gradient in each node.} - -\item{dump_format}{either 'text' or 'json' format could be specified.} - -\item{...}{currently not used} -} -\value{ -If fname is not provided or set to \code{NULL} the function will return the model -as a \code{character} vector. Otherwise it will return \code{TRUE}. -} -\description{ -Dump an xgboost model in text format. -} -\examples{ -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -# save the model in file 'xgb.model.dump' -dump_path = file.path(tempdir(), 'model.dump') -xgb.dump(bst, dump_path, with_stats = TRUE) - -# print the model without saving it to a file -print(xgb.dump(bst, with_stats = TRUE)) - -# print in JSON format: -cat(xgb.dump(bst, with_stats = TRUE, dump_format='json')) - -} diff --git a/ml-xgboost/R-package/man/xgb.gblinear.history.Rd b/ml-xgboost/R-package/man/xgb.gblinear.history.Rd deleted file mode 100644 index bc8d467..0000000 --- a/ml-xgboost/R-package/man/xgb.gblinear.history.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/callbacks.R -\name{xgb.gblinear.history} -\alias{xgb.gblinear.history} -\title{Extract gblinear coefficients history.} -\usage{ -xgb.gblinear.history(model, class_index = NULL) -} -\arguments{ -\item{model}{either an \code{xgb.Booster} or a result of \code{xgb.cv()}, trained -using the \code{cb.gblinear.history()} callback.} - -\item{class_index}{zero-based class index to extract the coefficients for only that -specific class in a multinomial multiclass model. When it is NULL, all the -coefficients are returned. Has no effect in non-multiclass models.} -} -\value{ -For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns -corresponding to iteration's coefficients (in the order as \code{xgb.dump()} would -return) and the rows corresponding to boosting iterations. - -For an \code{xgb.cv} result, a list of such matrices is returned with the elements -corresponding to CV folds. -} -\description{ -A helper function to extract the matrix of linear coefficients' history -from a gblinear model created while using the \code{cb.gblinear.history()} -callback. -} diff --git a/ml-xgboost/R-package/man/xgb.importance.Rd b/ml-xgboost/R-package/man/xgb.importance.Rd deleted file mode 100644 index 84a18e1..0000000 --- a/ml-xgboost/R-package/man/xgb.importance.Rd +++ /dev/null @@ -1,101 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.importance.R -\name{xgb.importance} -\alias{xgb.importance} -\title{Importance of features in a model.} -\usage{ -xgb.importance( - feature_names = NULL, - model = NULL, - trees = NULL, - data = NULL, - label = NULL, - target = NULL -) -} -\arguments{ -\item{feature_names}{character vector of feature names. If the model already -contains feature names, those would be used when \code{feature_names=NULL} (default value). -Non-null \code{feature_names} could be provided to override those in the model.} - -\item{model}{object of class \code{xgb.Booster}.} - -\item{trees}{(only for the gbtree booster) an integer vector of tree indices that should be included -into the importance calculation. If set to \code{NULL}, all trees of the model are parsed. -It could be useful, e.g., in multiclass classification to get feature importances -for each class separately. IMPORTANT: the tree index in xgboost models -is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).} - -\item{data}{deprecated.} - -\item{label}{deprecated.} - -\item{target}{deprecated.} -} -\value{ -For a tree model, a \code{data.table} with the following columns: -\itemize{ - \item \code{Features} names of the features used in the model; - \item \code{Gain} represents fractional contribution of each feature to the model based on - the total gain of this feature's splits. Higher percentage means a more important - predictive feature. - \item \code{Cover} metric of the number of observation related to this feature; - \item \code{Frequency} percentage representing the relative number of times - a feature have been used in trees. -} - -A linear model's importance \code{data.table} has the following columns: -\itemize{ - \item \code{Features} names of the features used in the model; - \item \code{Weight} the linear coefficient of this feature; - \item \code{Class} (only for multiclass models) class label. -} - -If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names}, -index of the features will be used instead. Because the index is extracted from the model dump -(based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R). -} -\description{ -Creates a \code{data.table} of feature importances in a model. -} -\details{ -This function works for both linear and tree models. - -For linear models, the importance is the absolute magnitude of linear coefficients. -For that reason, in order to obtain a meaningful ranking by importance for a linear model, -the features need to be on the same scale (which you also would want to do when using either -L1 or L2 regularization). -} -\examples{ - -# binomial classification using gbtree: -data(agaricus.train, package='xgboost') -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -xgb.importance(model = bst) - -# binomial classification using gblinear: -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear", - eta = 0.3, nthread = 1, nrounds = 20, objective = "binary:logistic") -xgb.importance(model = bst) - -# multiclass classification using gbtree: -nclass <- 3 -nrounds <- 10 -mbst <- xgboost(data = as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1, - max_depth = 3, eta = 0.2, nthread = 2, nrounds = nrounds, - objective = "multi:softprob", num_class = nclass) -# all classes clumped together: -xgb.importance(model = mbst) -# inspect importances separately for each class: -xgb.importance(model = mbst, trees = seq(from=0, by=nclass, length.out=nrounds)) -xgb.importance(model = mbst, trees = seq(from=1, by=nclass, length.out=nrounds)) -xgb.importance(model = mbst, trees = seq(from=2, by=nclass, length.out=nrounds)) - -# multiclass classification using gblinear: -mbst <- xgboost(data = scale(as.matrix(iris[, -5])), label = as.numeric(iris$Species) - 1, - booster = "gblinear", eta = 0.2, nthread = 1, nrounds = 15, - objective = "multi:softprob", num_class = nclass) -xgb.importance(model = mbst) - -} diff --git a/ml-xgboost/R-package/man/xgb.load.Rd b/ml-xgboost/R-package/man/xgb.load.Rd deleted file mode 100644 index 3f743e1..0000000 --- a/ml-xgboost/R-package/man/xgb.load.Rd +++ /dev/null @@ -1,41 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.load.R -\name{xgb.load} -\alias{xgb.load} -\title{Load xgboost model from binary file} -\usage{ -xgb.load(modelfile) -} -\arguments{ -\item{modelfile}{the name of the binary input file.} -} -\value{ -An object of \code{xgb.Booster} class. -} -\description{ -Load xgboost model from the binary model file. -} -\details{ -The input file is expected to contain a model saved in an xgboost-internal binary format -using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some -appropriate methods from other xgboost interfaces. E.g., a model trained in Python and -saved from there in xgboost format, could be loaded from R. - -Note: a model saved as an R-object, has to be loaded using corresponding R-methods, -not \code{xgb.load}. -} -\examples{ -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -xgb.save(bst, 'xgb.model') -bst <- xgb.load('xgb.model') -if (file.exists('xgb.model')) file.remove('xgb.model') -pred <- predict(bst, test$data) -} -\seealso{ -\code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}. -} diff --git a/ml-xgboost/R-package/man/xgb.load.raw.Rd b/ml-xgboost/R-package/man/xgb.load.raw.Rd deleted file mode 100644 index f0248cd..0000000 --- a/ml-xgboost/R-package/man/xgb.load.raw.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.load.raw.R -\name{xgb.load.raw} -\alias{xgb.load.raw} -\title{Load serialised xgboost model from R's raw vector} -\usage{ -xgb.load.raw(buffer) -} -\arguments{ -\item{buffer}{the buffer returned by xgb.save.raw} -} -\description{ -User can generate raw memory buffer by calling xgb.save.raw -} diff --git a/ml-xgboost/R-package/man/xgb.model.dt.tree.Rd b/ml-xgboost/R-package/man/xgb.model.dt.tree.Rd deleted file mode 100644 index cf17501..0000000 --- a/ml-xgboost/R-package/man/xgb.model.dt.tree.Rd +++ /dev/null @@ -1,83 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.model.dt.tree.R -\name{xgb.model.dt.tree} -\alias{xgb.model.dt.tree} -\title{Parse a boosted tree model text dump} -\usage{ -xgb.model.dt.tree( - feature_names = NULL, - model = NULL, - text = NULL, - trees = NULL, - use_int_id = FALSE, - ... -) -} -\arguments{ -\item{feature_names}{character vector of feature names. If the model already -contains feature names, those would be used when \code{feature_names=NULL} (default value). -Non-null \code{feature_names} could be provided to override those in the model.} - -\item{model}{object of class \code{xgb.Booster}} - -\item{text}{\code{character} vector previously generated by the \code{xgb.dump} -function (where parameter \code{with_stats = TRUE} should have been set). -\code{text} takes precedence over \code{model}.} - -\item{trees}{an integer vector of tree indices that should be parsed. -If set to \code{NULL}, all trees of the model are parsed. -It could be useful, e.g., in multiclass classification to get only -the trees of one certain class. IMPORTANT: the tree index in xgboost models -is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).} - -\item{use_int_id}{a logical flag indicating whether nodes in columns "Yes", "No", "Missing" should be -represented as integers (when FALSE) or as "Tree-Node" character strings (when FALSE).} - -\item{...}{currently not used.} -} -\value{ -A \code{data.table} with detailed information about model trees' nodes. - -The columns of the \code{data.table} are: - -\itemize{ - \item \code{Tree}: integer ID of a tree in a model (zero-based index) - \item \code{Node}: integer ID of a node in a tree (zero-based index) - \item \code{ID}: character identifier of a node in a model (only when \code{use_int_id=FALSE}) - \item \code{Feature}: for a branch node, it's a feature id or name (when available); - for a leaf note, it simply labels it as \code{'Leaf'} - \item \code{Split}: location of the split for a branch node (split condition is always "less than") - \item \code{Yes}: ID of the next node when the split condition is met - \item \code{No}: ID of the next node when the split condition is not met - \item \code{Missing}: ID of the next node when branch value is missing - \item \code{Quality}: either the split gain (change in loss) or the leaf value - \item \code{Cover}: metric related to the number of observation either seen by a split - or collected by a leaf during training. -} - -When \code{use_int_id=FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers -in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from -the corresponding trees in the "Node" column. -} -\description{ -Parse a boosted tree model text dump into a \code{data.table} structure. -} -\examples{ -# Basic use: - -data(agaricus.train, package='xgboost') - -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") - -(dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst)) - -# This bst model already has feature_names stored with it, so those would be used when -# feature_names is not set: -(dt <- xgb.model.dt.tree(model = bst)) - -# How to match feature names of splits that are following a current 'Yes' branch: - -merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)] - -} diff --git a/ml-xgboost/R-package/man/xgb.parameters.Rd b/ml-xgboost/R-package/man/xgb.parameters.Rd deleted file mode 100644 index ab26956..0000000 --- a/ml-xgboost/R-package/man/xgb.parameters.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.Booster.R -\name{xgb.parameters<-} -\alias{xgb.parameters<-} -\title{Accessors for model parameters.} -\usage{ -xgb.parameters(object) <- value -} -\arguments{ -\item{object}{Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}.} - -\item{value}{a list (or an object coercible to a list) with the names of parameters to set -and the elements corresponding to parameter values.} -} -\description{ -Only the setter for xgboost parameters is currently implemented. -} -\details{ -Note that the setter would usually work more efficiently for \code{xgb.Booster.handle} -than for \code{xgb.Booster}, since only just a handle would need to be copied. -} -\examples{ -data(agaricus.train, package='xgboost') -train <- agaricus.train - -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") - -xgb.parameters(bst) <- list(eta = 0.1) - -} diff --git a/ml-xgboost/R-package/man/xgb.plot.deepness.Rd b/ml-xgboost/R-package/man/xgb.plot.deepness.Rd deleted file mode 100644 index b642398..0000000 --- a/ml-xgboost/R-package/man/xgb.plot.deepness.Rd +++ /dev/null @@ -1,80 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.ggplot.R, R/xgb.plot.deepness.R -\name{xgb.ggplot.deepness} -\alias{xgb.ggplot.deepness} -\alias{xgb.plot.deepness} -\title{Plot model trees deepness} -\usage{ -xgb.ggplot.deepness( - model = NULL, - which = c("2x1", "max.depth", "med.depth", "med.weight") -) - -xgb.plot.deepness( - model = NULL, - which = c("2x1", "max.depth", "med.depth", "med.weight"), - plot = TRUE, - ... -) -} -\arguments{ -\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function -or a data.table result of the \code{xgb.model.dt.tree} function.} - -\item{which}{which distribution to plot (see details).} - -\item{plot}{(base R barplot) whether a barplot should be produced. -If FALSE, only a data.table is returned.} - -\item{...}{other parameters passed to \code{barplot} or \code{plot}.} -} -\value{ -Other than producing plots (when \code{plot=TRUE}), the \code{xgb.plot.deepness} function -silently returns a processed data.table where each row corresponds to a terminal leaf in a tree model, -and contains information about leaf's depth, cover, and weight (which is used in calculating predictions). - -The \code{xgb.ggplot.deepness} silently returns either a list of two ggplot graphs when \code{which="2x1"} -or a single ggplot graph for the other \code{which} options. -} -\description{ -Visualizes distributions related to depth of tree leafs. -\code{xgb.plot.deepness} uses base R graphics, while \code{xgb.ggplot.deepness} uses the ggplot backend. -} -\details{ -When \code{which="2x1"}, two distributions with respect to the leaf depth -are plotted on top of each other: -\itemize{ - \item the distribution of the number of leafs in a tree model at a certain depth; - \item the distribution of average weighted number of observations ("cover") - ending up in leafs at certain depth. -} -Those could be helpful in determining sensible ranges of the \code{max_depth} -and \code{min_child_weight} parameters. - -When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth -per tree with respect to tree number are created. And \code{which="med.weight"} allows to see how -a tree's median absolute leaf weight changes through the iterations. - -This function was inspired by the blog post -\url{https://github.com/aysent/random-forest-leaf-visualization}. -} -\examples{ - -data(agaricus.train, package='xgboost') - -# Change max_depth to a higher number to get a more significant result -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 6, - eta = 0.1, nthread = 2, nrounds = 50, objective = "binary:logistic", - subsample = 0.5, min_child_weight = 2) - -xgb.plot.deepness(bst) -xgb.ggplot.deepness(bst) - -xgb.plot.deepness(bst, which='max.depth', pch=16, col=rgb(0,0,1,0.3), cex=2) - -xgb.plot.deepness(bst, which='med.weight', pch=16, col=rgb(0,0,1,0.3), cex=2) - -} -\seealso{ -\code{\link{xgb.train}}, \code{\link{xgb.model.dt.tree}}. -} diff --git a/ml-xgboost/R-package/man/xgb.plot.importance.Rd b/ml-xgboost/R-package/man/xgb.plot.importance.Rd deleted file mode 100644 index 691a8fd..0000000 --- a/ml-xgboost/R-package/man/xgb.plot.importance.Rd +++ /dev/null @@ -1,94 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.ggplot.R, R/xgb.plot.importance.R -\name{xgb.ggplot.importance} -\alias{xgb.ggplot.importance} -\alias{xgb.plot.importance} -\title{Plot feature importance as a bar graph} -\usage{ -xgb.ggplot.importance( - importance_matrix = NULL, - top_n = NULL, - measure = NULL, - rel_to_first = FALSE, - n_clusters = c(1:10), - ... -) - -xgb.plot.importance( - importance_matrix = NULL, - top_n = NULL, - measure = NULL, - rel_to_first = FALSE, - left_margin = 10, - cex = NULL, - plot = TRUE, - ... -) -} -\arguments{ -\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.} - -\item{top_n}{maximal number of top features to include into the plot.} - -\item{measure}{the name of importance measure to plot. -When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.} - -\item{rel_to_first}{whether importance values should be represented as relative to the highest ranked feature. -See Details.} - -\item{n_clusters}{(ggplot only) a \code{numeric} vector containing the min and the max range -of the possible number of clusters of bars.} - -\item{...}{other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).} - -\item{left_margin}{(base R barplot) allows to adjust the left margin size to fit feature names. -When it is NULL, the existing \code{par('mar')} is used.} - -\item{cex}{(base R barplot) passed as \code{cex.names} parameter to \code{barplot}.} - -\item{plot}{(base R barplot) whether a barplot should be produced. -If FALSE, only a data.table is returned.} -} -\value{ -The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE}) -and silently returns a processed data.table with \code{n_top} features sorted by importance. - -The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards. -E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result. -} -\description{ -Represents previously calculated feature importance as a bar graph. -\code{xgb.plot.importance} uses base R graphics, while \code{xgb.ggplot.importance} uses the ggplot backend. -} -\details{ -The graph represents each feature as a horizontal bar of length proportional to the importance of a feature. -Features are shown ranked in a decreasing importance order. -It works for importances from both \code{gblinear} and \code{gbtree} models. - -When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}. -For gbtree model, that would mean being normalized to the total of 1 -("what is feature's importance contribution relative to the whole model?"). -For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients. -Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of -"what is feature's importance contribution relative to the most important feature?" - -The ggplot-backend method also performs 1-D clustering of the importance values, -with bar colors corresponding to different clusters that have somewhat similar importance values. -} -\examples{ -data(agaricus.train) - -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") - -importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst) - -xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance") - -(gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE)) -gg + ggplot2::ylab("Frequency") - -} -\seealso{ -\code{\link[graphics]{barplot}}. -} diff --git a/ml-xgboost/R-package/man/xgb.plot.multi.trees.Rd b/ml-xgboost/R-package/man/xgb.plot.multi.trees.Rd deleted file mode 100644 index 74c4a06..0000000 --- a/ml-xgboost/R-package/man/xgb.plot.multi.trees.Rd +++ /dev/null @@ -1,82 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.plot.multi.trees.R -\name{xgb.plot.multi.trees} -\alias{xgb.plot.multi.trees} -\title{Project all trees on one tree and plot it} -\usage{ -xgb.plot.multi.trees( - model, - feature_names = NULL, - features_keep = 5, - plot_width = NULL, - plot_height = NULL, - render = TRUE, - ... -) -} -\arguments{ -\item{model}{produced by the \code{xgb.train} function.} - -\item{feature_names}{names of each feature as a \code{character} vector.} - -\item{features_keep}{number of features to keep in each position of the multi trees.} - -\item{plot_width}{width in pixels of the graph to produce} - -\item{plot_height}{height in pixels of the graph to produce} - -\item{render}{a logical flag for whether the graph should be rendered (see Value).} - -\item{...}{currently not used} -} -\value{ -When \code{render = TRUE}: -returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}. -Similar to ggplot objects, it needs to be printed to see it when not running from command line. - -When \code{render = FALSE}: -silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}. -This could be useful if one wants to modify some of the graph attributes -before rendering the graph with \code{\link[DiagrammeR]{render_graph}}. -} -\description{ -Visualization of the ensemble of trees as a single collective unit. -} -\details{ -This function tries to capture the complexity of a gradient boosted tree model -in a cohesive way by compressing an ensemble of trees into a single tree-graph representation. -The goal is to improve the interpretability of a model generally seen as black box. - -Note: this function is applicable to tree booster-based models only. - -It takes advantage of the fact that the shape of a binary tree is only defined by -its depth (therefore, in a boosting model, all trees have similar shape). - -Moreover, the trees tend to reuse the same features. - -The function projects each tree onto one, and keeps for each position the -\code{features_keep} first features (based on the Gain per feature measure). - -This function is inspired by this blog post: -\url{https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/} -} -\examples{ - -data(agaricus.train, package='xgboost') - -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 15, - eta = 1, nthread = 2, nrounds = 30, objective = "binary:logistic", - min_child_weight = 50, verbose = 0) - -p <- xgb.plot.multi.trees(model = bst, features_keep = 3) -print(p) - -\dontrun{ -# Below is an example of how to save this plot to a file. -# Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. -library(DiagrammeR) -gr <- xgb.plot.multi.trees(model=bst, features_keep = 3, render=FALSE) -export_graph(gr, 'tree.pdf', width=1500, height=600) -} - -} diff --git a/ml-xgboost/R-package/man/xgb.plot.shap.Rd b/ml-xgboost/R-package/man/xgb.plot.shap.Rd deleted file mode 100644 index 3cd3a89..0000000 --- a/ml-xgboost/R-package/man/xgb.plot.shap.Rd +++ /dev/null @@ -1,158 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.plot.shap.R -\name{xgb.plot.shap} -\alias{xgb.plot.shap} -\title{SHAP contribution dependency plots} -\usage{ -xgb.plot.shap( - data, - shap_contrib = NULL, - features = NULL, - top_n = 1, - model = NULL, - trees = NULL, - target_class = NULL, - approxcontrib = FALSE, - subsample = NULL, - n_col = 1, - col = rgb(0, 0, 1, 0.2), - pch = ".", - discrete_n_uniq = 5, - discrete_jitter = 0.01, - ylab = "SHAP", - plot_NA = TRUE, - col_NA = rgb(0.7, 0, 1, 0.6), - pch_NA = ".", - pos_NA = 1.07, - plot_loess = TRUE, - col_loess = 2, - span_loess = 0.5, - which = c("1d", "2d"), - plot = TRUE, - ... -) -} -\arguments{ -\item{data}{data as a \code{matrix} or \code{dgCMatrix}.} - -\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above -\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.} - -\item{features}{a vector of either column indices or of feature names to plot. When it is NULL, -feature importance is calculated, and \code{top_n} high ranked features are taken.} - -\item{top_n}{when \code{features} is NULL, top_n [1, 100] most important features in a model are taken.} - -\item{model}{an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib} -or \code{features} is missing.} - -\item{trees}{passed to \code{\link{xgb.importance}} when \code{features = NULL}.} - -\item{target_class}{is only relevant for multiclass models. When it is set to a 0-based class index, -only SHAP contributions for that specific class are used. -If it is not set, SHAP importances are averaged over all classes.} - -\item{approxcontrib}{passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.} - -\item{subsample}{a random fraction of data points to use for plotting. When it is NULL, -it is set so that up to 100K data points are used.} - -\item{n_col}{a number of columns in a grid of plots.} - -\item{col}{color of the scatterplot markers.} - -\item{pch}{scatterplot marker.} - -\item{discrete_n_uniq}{a maximal number of unique values in a feature to consider it as discrete.} - -\item{discrete_jitter}{an \code{amount} parameter of jitter added to discrete features' positions.} - -\item{ylab}{a y-axis label in 1D plots.} - -\item{plot_NA}{whether the contributions of cases with missing values should also be plotted.} - -\item{col_NA}{a color of marker for missing value contributions.} - -\item{pch_NA}{a marker type for NA values.} - -\item{pos_NA}{a relative position of the x-location where NA values are shown: -\code{min(x) + (max(x) - min(x)) * pos_NA}.} - -\item{plot_loess}{whether to plot loess-smoothed curves. The smoothing is only done for features with -more than 5 distinct values.} - -\item{col_loess}{a color to use for the loess curves.} - -\item{span_loess}{the \code{span} parameter in \code{\link[stats]{loess}}'s call.} - -\item{which}{whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.} - -\item{plot}{whether a plot should be drawn. If FALSE, only a lits of matrices is returned.} - -\item{...}{other parameters passed to \code{plot}.} -} -\value{ -In addition to producing plots (when \code{plot=TRUE}), it silently returns a list of two matrices: -\itemize{ - \item \code{data} the values of selected features; - \item \code{shap_contrib} the contributions of selected features. -} -} -\description{ -Visualizing the SHAP feature contribution to prediction dependencies on feature value. -} -\details{ -These scatterplots represent how SHAP feature contributions depend of feature values. -The similarity to partial dependency plots is that they also give an idea for how feature values -affect predictions. However, in partial dependency plots, we usually see marginal dependencies -of model prediction on feature value, while SHAP contribution dependency plots display the estimated -contributions of a feature to model prediction for each individual case. - -When \code{plot_loess = TRUE} is set, feature values are rounded to 3 significant digits and -weighted LOESS is computed and plotted, where weights are the numbers of data points -at each rounded value. - -Note: SHAP contributions are shown on the scale of model margin. E.g., for a logistic binomial objective, -the margin is prediction before a sigmoidal transform into probability-like values. -Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP -contributions for all features + bias), depending on the objective used, transforming SHAP -contributions for a feature from the marginal to the prediction space is not necessarily -a meaningful thing to do. -} -\examples{ - -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') - -bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50, - eta = 0.1, max_depth = 3, subsample = .5, - method = "hist", objective = "binary:logistic", nthread = 2, verbose = 0) - -xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none") -contr <- predict(bst, agaricus.test$data, predcontrib = TRUE) -xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3) - -# multiclass example - plots for each class separately: -nclass <- 3 -nrounds <- 20 -x <- as.matrix(iris[, -5]) -set.seed(123) -is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values -mbst <- xgboost(data = x, label = as.numeric(iris$Species) - 1, nrounds = nrounds, - max_depth = 2, eta = 0.3, subsample = .5, nthread = 2, - objective = "multi:softprob", num_class = nclass, verbose = 0) -trees0 <- seq(from=0, by=nclass, length.out=nrounds) -col <- rgb(0, 0, 1, 0.5) -xgb.plot.shap(x, model = mbst, trees = trees0, target_class = 0, top_n = 4, - n_col = 2, col = col, pch = 16, pch_NA = 17) -xgb.plot.shap(x, model = mbst, trees = trees0 + 1, target_class = 1, top_n = 4, - n_col = 2, col = col, pch = 16, pch_NA = 17) -xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4, - n_col = 2, col = col, pch = 16, pch_NA = 17) - -} -\references{ -Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874} - -Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060} -} diff --git a/ml-xgboost/R-package/man/xgb.plot.tree.Rd b/ml-xgboost/R-package/man/xgb.plot.tree.Rd deleted file mode 100644 index 3f9f99a..0000000 --- a/ml-xgboost/R-package/man/xgb.plot.tree.Rd +++ /dev/null @@ -1,91 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.plot.tree.R -\name{xgb.plot.tree} -\alias{xgb.plot.tree} -\title{Plot a boosted tree model} -\usage{ -xgb.plot.tree( - feature_names = NULL, - model = NULL, - trees = NULL, - plot_width = NULL, - plot_height = NULL, - render = TRUE, - show_node_id = FALSE, - ... -) -} -\arguments{ -\item{feature_names}{names of each feature as a \code{character} vector.} - -\item{model}{produced by the \code{xgb.train} function.} - -\item{trees}{an integer vector of tree indices that should be visualized. -If set to \code{NULL}, all trees of the model are included. -IMPORTANT: the tree index in xgboost model is zero-based -(e.g., use \code{trees = 0:2} for the first 3 trees in a model).} - -\item{plot_width}{the width of the diagram in pixels.} - -\item{plot_height}{the height of the diagram in pixels.} - -\item{render}{a logical flag for whether the graph should be rendered (see Value).} - -\item{show_node_id}{a logical flag for whether to show node id's in the graph.} - -\item{...}{currently not used.} -} -\value{ -When \code{render = TRUE}: -returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}. -Similar to ggplot objects, it needs to be printed to see it when not running from command line. - -When \code{render = FALSE}: -silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}. -This could be useful if one wants to modify some of the graph attributes -before rendering the graph with \code{\link[DiagrammeR]{render_graph}}. -} -\description{ -Read a tree model text dump and plot the model. -} -\details{ -The content of each node is organised that way: - -\itemize{ - \item Feature name. - \item \code{Cover}: The sum of second order gradient of training data classified to the leaf. - If it is square loss, this simply corresponds to the number of instances seen by a split - or collected by a leaf during training. - The deeper in the tree a node is, the lower this metric will be. - \item \code{Gain} (for split nodes): the information gain metric of a split - (corresponds to the importance of the node in the model). - \item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction. -} -The tree root nodes also indicate the Tree index (0-based). - -The "Yes" branches are marked by the "< split_value" label. -The branches that also used for missing values are marked as bold -(as in "carrying extra capacity"). - -This function uses \href{http://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR. -} -\examples{ -data(agaricus.train, package='xgboost') - -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3, - eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -# plot all the trees -xgb.plot.tree(model = bst) -# plot only the first tree and display the node ID: -xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE) - -\dontrun{ -# Below is an example of how to save this plot to a file. -# Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed. -library(DiagrammeR) -gr <- xgb.plot.tree(model=bst, trees=0:1, render=FALSE) -export_graph(gr, 'tree.pdf', width=1500, height=1900) -export_graph(gr, 'tree.png', width=1500, height=1900) -} - -} diff --git a/ml-xgboost/R-package/man/xgb.save.Rd b/ml-xgboost/R-package/man/xgb.save.Rd deleted file mode 100644 index 7d1842d..0000000 --- a/ml-xgboost/R-package/man/xgb.save.Rd +++ /dev/null @@ -1,41 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.save.R -\name{xgb.save} -\alias{xgb.save} -\title{Save xgboost model to binary file} -\usage{ -xgb.save(model, fname) -} -\arguments{ -\item{model}{model object of \code{xgb.Booster} class.} - -\item{fname}{name of the file to write.} -} -\description{ -Save xgboost model to a file in binary format. -} -\details{ -This methods allows to save a model in an xgboost-internal binary format which is universal -among the various xgboost interfaces. In R, the saved model file could be read-in later -using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter -of \code{\link{xgb.train}}. - -Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}} -or \code{\link[base]{save}}). However, it would then only be compatible with R, and -corresponding R-methods would need to be used to load it. -} -\examples{ -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -xgb.save(bst, 'xgb.model') -bst <- xgb.load('xgb.model') -if (file.exists('xgb.model')) file.remove('xgb.model') -pred <- predict(bst, test$data) -} -\seealso{ -\code{\link{xgb.load}}, \code{\link{xgb.Booster.complete}}. -} diff --git a/ml-xgboost/R-package/man/xgb.save.raw.Rd b/ml-xgboost/R-package/man/xgb.save.raw.Rd deleted file mode 100644 index 6f2faa0..0000000 --- a/ml-xgboost/R-package/man/xgb.save.raw.Rd +++ /dev/null @@ -1,27 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.save.raw.R -\name{xgb.save.raw} -\alias{xgb.save.raw} -\title{Save xgboost model to R's raw vector, -user can call xgb.load.raw to load the model back from raw vector} -\usage{ -xgb.save.raw(model) -} -\arguments{ -\item{model}{the model object.} -} -\description{ -Save xgboost model from xgboost or xgb.train -} -\examples{ -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -raw <- xgb.save.raw(bst) -bst <- xgb.load.raw(raw) -pred <- predict(bst, test$data) - -} diff --git a/ml-xgboost/R-package/man/xgb.serialize.Rd b/ml-xgboost/R-package/man/xgb.serialize.Rd deleted file mode 100644 index 952441d..0000000 --- a/ml-xgboost/R-package/man/xgb.serialize.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.serialize.R -\name{xgb.serialize} -\alias{xgb.serialize} -\title{Serialize the booster instance into R's raw vector. The serialization method differs -from \code{\link{xgb.save.raw}} as the latter one saves only the model but not -parameters. This serialization format is not stable across different xgboost versions.} -\usage{ -xgb.serialize(booster) -} -\arguments{ -\item{booster}{the booster instance} -} -\description{ -Serialize the booster instance into R's raw vector. The serialization method differs -from \code{\link{xgb.save.raw}} as the latter one saves only the model but not -parameters. This serialization format is not stable across different xgboost versions. -} -\examples{ -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic") -raw <- xgb.serialize(bst) -bst <- xgb.unserialize(raw) - -} diff --git a/ml-xgboost/R-package/man/xgb.train.Rd b/ml-xgboost/R-package/man/xgb.train.Rd deleted file mode 100644 index a6c91cc..0000000 --- a/ml-xgboost/R-package/man/xgb.train.Rd +++ /dev/null @@ -1,299 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.train.R, R/xgboost.R -\name{xgb.train} -\alias{xgb.train} -\alias{xgboost} -\title{eXtreme Gradient Boosting Training} -\usage{ -xgb.train( - params = list(), - data, - nrounds, - watchlist = list(), - obj = NULL, - feval = NULL, - verbose = 1, - print_every_n = 1L, - early_stopping_rounds = NULL, - maximize = NULL, - save_period = NULL, - save_name = "xgboost.model", - xgb_model = NULL, - callbacks = list(), - ... -) - -xgboost( - data = NULL, - label = NULL, - missing = NA, - weight = NULL, - params = list(), - nrounds, - verbose = 1, - print_every_n = 1L, - early_stopping_rounds = NULL, - maximize = NULL, - save_period = NULL, - save_name = "xgboost.model", - xgb_model = NULL, - callbacks = list(), - ... -) -} -\arguments{ -\item{params}{the list of parameters. - The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}. - Below is a shorter summary: - -1. General Parameters - -\itemize{ - \item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}. -} - -2. Booster Parameters - -2.1. Parameter for Tree Booster - -\itemize{ - \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3 - \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be. - \item \code{max_depth} maximum depth of a tree. Default: 6 - \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1 - \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1 - \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1 - \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1 - \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint. - \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints. -} - -2.2. Parameter for Linear Booster - -\itemize{ - \item \code{lambda} L2 regularization term on weights. Default: 0 - \item \code{lambda_bias} L2 regularization term on bias. Default: 0 - \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0 -} - -3. Task Parameters - -\itemize{ -\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below: - \itemize{ - \item \code{reg:squarederror} Regression with squared loss (Default). - \item \code{reg:logistic} logistic regression. - \item \code{binary:logistic} logistic regression for binary classification. Output probability. - \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation. - \item \code{num_class} set the number of classes. To use only with multiclass objectives. - \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}. - \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class. - \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss. - } - \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5 - \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section. -}} - -\item{data}{training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input. -\code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or name of a local data file.} - -\item{nrounds}{max number of boosting iterations.} - -\item{watchlist}{named list of xgb.DMatrix datasets to use for evaluating model performance. -Metrics specified in either \code{eval_metric} or \code{feval} will be computed for each -of these datasets during each boosting iteration, and stored in the end as a field named -\code{evaluation_log} in the resulting object. When either \code{verbose>=1} or -\code{\link{cb.print.evaluation}} callback is engaged, the performance results are continuously -printed out during the training. -E.g., specifying \code{watchlist=list(validation1=mat1, validation2=mat2)} allows to track -the performance of each round's model on mat1 and mat2.} - -\item{obj}{customized objective function. Returns gradient and second order -gradient with given prediction and dtrain.} - -\item{feval}{customized evaluation function. Returns -\code{list(metric='metric-name', value='metric-value')} with given -prediction and dtrain.} - -\item{verbose}{If 0, xgboost will stay silent. If 1, it will print information about performance. -If 2, some additional information will be printed out. -Note that setting \code{verbose > 0} automatically engages the -\code{cb.print.evaluation(period=1)} callback function.} - -\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}. -Default is 1 which means all messages are printed. This parameter is passed to the -\code{\link{cb.print.evaluation}} callback.} - -\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered. -If set to an integer \code{k}, training with a validation set will stop if the performance -doesn't improve for \code{k} rounds. -Setting this parameter engages the \code{\link{cb.early.stop}} callback.} - -\item{maximize}{If \code{feval} and \code{early_stopping_rounds} are set, -then this parameter must be set as well. -When it is \code{TRUE}, it means the larger the evaluation score the better. -This parameter is passed to the \code{\link{cb.early.stop}} callback.} - -\item{save_period}{when it is non-NULL, model is saved to disk after every \code{save_period} rounds, -0 means save at the end. The saving is handled by the \code{\link{cb.save.model}} callback.} - -\item{save_name}{the name or path for periodically saved model file.} - -\item{xgb_model}{a previously built model to continue the training from. -Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a -file with a previously saved model.} - -\item{callbacks}{a list of callback functions to perform various task during boosting. -See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the -parameters' values. User can provide either existing or their own callback methods in order -to customize the training process.} - -\item{...}{other parameters to pass to \code{params}.} - -\item{label}{vector of response values. Should not be provided when data is -a local data file name or an \code{xgb.DMatrix}.} - -\item{missing}{by default is set to NA, which means that NA values should be considered as 'missing' -by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values. -This parameter is only used when input is a dense matrix.} - -\item{weight}{a vector indicating the weight for each row of the input.} -} -\value{ -An object of class \code{xgb.Booster} with the following elements: -\itemize{ - \item \code{handle} a handle (pointer) to the xgboost model in memory. - \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type. - \item \code{niter} number of boosting iterations. - \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the - first column corresponding to iteration number and the rest corresponding to evaluation - metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback. - \item \code{call} a function call. - \item \code{params} parameters that were passed to the xgboost library. Note that it does not - capture parameters changed by the \code{\link{cb.reset.parameters}} callback. - \item \code{callbacks} callback functions that were either automatically assigned or - explicitly passed. - \item \code{best_iteration} iteration number with the best evaluation metric value - (only available with early stopping). - \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration, - which could further be used in \code{predict} method - (only available with early stopping). - \item \code{best_score} the best evaluation metric value during early stopping. - (only available with early stopping). - \item \code{feature_names} names of the training dataset features - (only when column names were defined in training data). - \item \code{nfeatures} number of features in training data. -} -} -\description{ -\code{xgb.train} is an advanced interface for training an xgboost model. -The \code{xgboost} function is a simpler wrapper for \code{xgb.train}. -} -\details{ -These are the training functions for \code{xgboost}. - -The \code{xgb.train} interface supports advanced features such as \code{watchlist}, -customized objective and evaluation metric functions, therefore it is more flexible -than the \code{xgboost} interface. - -Parallelization is automatically enabled if \code{OpenMP} is present. -Number of threads can also be manually specified via \code{nthread} parameter. - -The evaluation metric is chosen automatically by Xgboost (according to the objective) -when the \code{eval_metric} parameter is not provided. -User may set one or several \code{eval_metric} parameters. -Note that when using a customized metric, only this single metric can be used. -The following is the list of built-in metrics for which Xgboost provides optimized implementation: - \itemize{ - \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error} - \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood} - \item \code{mlogloss} multiclass logloss. \url{http://wiki.fast.ai/index.php/Log_Loss} - \item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}. - By default, it uses the 0.5 threshold for predicted values to define negative and positive instances. - Different threshold (e.g., 0.) could be specified as "error@0." - \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}. - \item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation. - \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation. - \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG} - } - -The following callbacks are automatically created when certain parameters are set: -\itemize{ - \item \code{cb.print.evaluation} is turned on when \code{verbose > 0}; - and the \code{print_every_n} parameter is passed to it. - \item \code{cb.evaluation.log} is on when \code{watchlist} is present. - \item \code{cb.early.stop}: when \code{early_stopping_rounds} is set. - \item \code{cb.save.model}: when \code{save_period > 0} is set. -} -} -\examples{ -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') - -dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) -watchlist <- list(train = dtrain, eval = dtest) - -## A simple xgb.train example: -param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2, - objective = "binary:logistic", eval_metric = "auc") -bst <- xgb.train(param, dtrain, nrounds = 2, watchlist) - - -## An xgb.train example where custom objective and evaluation metric are used: -logregobj <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - preds <- 1/(1 + exp(-preds)) - grad <- preds - labels - hess <- preds * (1 - preds) - return(list(grad = grad, hess = hess)) -} -evalerror <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - err <- as.numeric(sum(labels != (preds > 0)))/length(labels) - return(list(metric = "error", value = err)) -} - -# These functions could be used by passing them either: -# as 'objective' and 'eval_metric' parameters in the params list: -param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2, - objective = logregobj, eval_metric = evalerror) -bst <- xgb.train(param, dtrain, nrounds = 2, watchlist) - -# or through the ... arguments: -param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2) -bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, - objective = logregobj, eval_metric = evalerror) - -# or as dedicated 'obj' and 'feval' parameters of xgb.train: -bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, - obj = logregobj, feval = evalerror) - - -## An xgb.train example of using variable learning rates at each iteration: -param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2, - objective = "binary:logistic", eval_metric = "auc") -my_etas <- list(eta = c(0.5, 0.1)) -bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, - callbacks = list(cb.reset.parameters(my_etas))) - -## Early stopping: -bst <- xgb.train(param, dtrain, nrounds = 25, watchlist, - early_stopping_rounds = 3) - -## An 'xgboost' interface example: -bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, - max_depth = 2, eta = 1, nthread = 2, nrounds = 2, - objective = "binary:logistic") -pred <- predict(bst, agaricus.test$data) - -} -\references{ -Tianqi Chen and Carlos Guestrin, "XGBoost: A Scalable Tree Boosting System", -22nd SIGKDD Conference on Knowledge Discovery and Data Mining, 2016, \url{https://arxiv.org/abs/1603.02754} -} -\seealso{ -\code{\link{callbacks}}, -\code{\link{predict.xgb.Booster}}, -\code{\link{xgb.cv}} -} diff --git a/ml-xgboost/R-package/man/xgb.unserialize.Rd b/ml-xgboost/R-package/man/xgb.unserialize.Rd deleted file mode 100644 index 7a11c5c..0000000 --- a/ml-xgboost/R-package/man/xgb.unserialize.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/xgb.unserialize.R -\name{xgb.unserialize} -\alias{xgb.unserialize} -\title{Load the instance back from \code{\link{xgb.serialize}}} -\usage{ -xgb.unserialize(buffer) -} -\arguments{ -\item{buffer}{the buffer containing booster instance saved by \code{\link{xgb.serialize}}} -} -\description{ -Load the instance back from \code{\link{xgb.serialize}} -} diff --git a/ml-xgboost/R-package/man/xgboost-deprecated.Rd b/ml-xgboost/R-package/man/xgboost-deprecated.Rd deleted file mode 100644 index 6ab0c6c..0000000 --- a/ml-xgboost/R-package/man/xgboost-deprecated.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/utils.R -\name{xgboost-deprecated} -\alias{xgboost-deprecated} -\title{Deprecation notices.} -\description{ -At this time, some of the parameter names were changed in order to make the code style more uniform. -The deprecated parameters would be removed in the next release. -} -\details{ -To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table. - -A deprecation warning is shown when any of the deprecated parameters is used in a call. -An additional warning is shown when there was a partial match to a deprecated parameter -(as R is able to partially match parameter names). -} diff --git a/ml-xgboost/R-package/remove_warning_suppression_pragma.sh b/ml-xgboost/R-package/remove_warning_suppression_pragma.sh deleted file mode 100644 index 5399ac9..0000000 --- a/ml-xgboost/R-package/remove_warning_suppression_pragma.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -# remove all #pragma's that suppress compiler warnings -set -e -set -x -for file in xgboost/src/dmlc-core/include/dmlc/*.h -do - sed -i.bak -e 's/^.*#pragma GCC diagnostic.*$//' -e 's/^.*#pragma clang diagnostic.*$//' -e 's/^.*#pragma warning.*$//' "${file}" -done -for file in xgboost/src/dmlc-core/include/dmlc/*.h.bak -do - rm "${file}" -done -set +x -set +e diff --git a/ml-xgboost/R-package/src/Makevars.in b/ml-xgboost/R-package/src/Makevars.in deleted file mode 100644 index ac25b50..0000000 --- a/ml-xgboost/R-package/src/Makevars.in +++ /dev/null @@ -1,24 +0,0 @@ -# package root -PKGROOT=../../ -ENABLE_STD_THREAD=1 -# _*_ mode: Makefile; _*_ - -CXX_STD = CXX11 - -XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\ - -DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\ - -DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\ - -DRABIT_CUSTOMIZE_MSG_ -DRABIT_STRICT_CXX98_ - -# disable the use of thread_local for 32 bit windows: -ifeq ($(R_OSTYPE)$(WIN),windows) - XGB_RFLAGS += -DDMLC_CXX11_THREAD_LOCAL=0 -endif -$(foreach v, $(XGB_RFLAGS), $(warning $(v))) - -PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS) -PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ @ENDIAN_FLAG@ -pthread -PKG_LIBS = @OPENMP_CXXFLAGS@ @OPENMP_LIB@ @ENDIAN_FLAG@ @BACKTRACE_LIB@ -pthread -OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\ - $(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\ - $(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o diff --git a/ml-xgboost/R-package/src/Makevars.win b/ml-xgboost/R-package/src/Makevars.win deleted file mode 100644 index 2e24116..0000000 --- a/ml-xgboost/R-package/src/Makevars.win +++ /dev/null @@ -1,38 +0,0 @@ -# package root -PKGROOT=./ -ENABLE_STD_THREAD=0 -# _*_ mode: Makefile; _*_ - -# This file is only used for windows compilation from github -# It will be replaced with Makevars.in for the CRAN version -.PHONY: all xgblib -all: $(SHLIB) -$(SHLIB): xgblib -xgblib: - cp -r ../../src . - cp -r ../../rabit . - cp -r ../../dmlc-core . - cp -r ../../include . - cp -r ../../amalgamation . - -CXX_STD = CXX11 - -XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\ - -DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\ - -DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\ - -DRABIT_CUSTOMIZE_MSG_ -DRABIT_STRICT_CXX98_ - -# disable the use of thread_local for 32 bit windows: -ifeq ($(R_OSTYPE)$(WIN),windows) - XGB_RFLAGS += -DDMLC_CXX11_THREAD_LOCAL=0 -endif -$(foreach v, $(XGB_RFLAGS), $(warning $(v))) - -PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS) -PKG_CXXFLAGS= $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS) -PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS) -OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\ - $(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\ - $(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o - -$(OBJECTS) : xgblib diff --git a/ml-xgboost/R-package/src/init.c b/ml-xgboost/R-package/src/init.c deleted file mode 100644 index 2093059..0000000 --- a/ml-xgboost/R-package/src/init.c +++ /dev/null @@ -1,85 +0,0 @@ -/* Copyright (c) 2015 by Contributors - * - * This file was initially generated using the following R command: - * tools::package_native_routine_registration_skeleton('.', con = 'src/init.c', character_only = F) - * and edited to conform to xgboost C linter requirements. For details, see - * https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Registering-native-routines - */ -#include -#include -#include -#include - -/* FIXME: -Check these declarations against the C/Fortran source code. -*/ - -/* .Call calls */ -extern SEXP XGBoosterBoostOneIter_R(SEXP, SEXP, SEXP, SEXP); -extern SEXP XGBoosterCreate_R(SEXP); -extern SEXP XGBoosterDumpModel_R(SEXP, SEXP, SEXP, SEXP); -extern SEXP XGBoosterEvalOneIter_R(SEXP, SEXP, SEXP, SEXP); -extern SEXP XGBoosterGetAttrNames_R(SEXP); -extern SEXP XGBoosterGetAttr_R(SEXP, SEXP); -extern SEXP XGBoosterLoadModelFromRaw_R(SEXP, SEXP); -extern SEXP XGBoosterLoadModel_R(SEXP, SEXP); -extern SEXP XGBoosterSaveJsonConfig_R(SEXP handle); -extern SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value); -extern SEXP XGBoosterSerializeToBuffer_R(SEXP handle); -extern SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw); -extern SEXP XGBoosterModelToRaw_R(SEXP); -extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP, SEXP); -extern SEXP XGBoosterSaveModel_R(SEXP, SEXP); -extern SEXP XGBoosterSetAttr_R(SEXP, SEXP, SEXP); -extern SEXP XGBoosterSetParam_R(SEXP, SEXP, SEXP); -extern SEXP XGBoosterUpdateOneIter_R(SEXP, SEXP, SEXP); -extern SEXP XGCheckNullPtr_R(SEXP); -extern SEXP XGDMatrixCreateFromCSC_R(SEXP, SEXP, SEXP, SEXP); -extern SEXP XGDMatrixCreateFromFile_R(SEXP, SEXP); -extern SEXP XGDMatrixCreateFromMat_R(SEXP, SEXP); -extern SEXP XGDMatrixGetInfo_R(SEXP, SEXP); -extern SEXP XGDMatrixNumCol_R(SEXP); -extern SEXP XGDMatrixNumRow_R(SEXP); -extern SEXP XGDMatrixSaveBinary_R(SEXP, SEXP, SEXP); -extern SEXP XGDMatrixSetInfo_R(SEXP, SEXP, SEXP); -extern SEXP XGDMatrixSliceDMatrix_R(SEXP, SEXP); - -static const R_CallMethodDef CallEntries[] = { - {"XGBoosterBoostOneIter_R", (DL_FUNC) &XGBoosterBoostOneIter_R, 4}, - {"XGBoosterCreate_R", (DL_FUNC) &XGBoosterCreate_R, 1}, - {"XGBoosterDumpModel_R", (DL_FUNC) &XGBoosterDumpModel_R, 4}, - {"XGBoosterEvalOneIter_R", (DL_FUNC) &XGBoosterEvalOneIter_R, 4}, - {"XGBoosterGetAttrNames_R", (DL_FUNC) &XGBoosterGetAttrNames_R, 1}, - {"XGBoosterGetAttr_R", (DL_FUNC) &XGBoosterGetAttr_R, 2}, - {"XGBoosterLoadModelFromRaw_R", (DL_FUNC) &XGBoosterLoadModelFromRaw_R, 2}, - {"XGBoosterLoadModel_R", (DL_FUNC) &XGBoosterLoadModel_R, 2}, - {"XGBoosterSaveJsonConfig_R", (DL_FUNC) &XGBoosterSaveJsonConfig_R, 1}, - {"XGBoosterLoadJsonConfig_R", (DL_FUNC) &XGBoosterLoadJsonConfig_R, 2}, - {"XGBoosterSerializeToBuffer_R", (DL_FUNC) &XGBoosterSerializeToBuffer_R, 1}, - {"XGBoosterUnserializeFromBuffer_R", (DL_FUNC) &XGBoosterUnserializeFromBuffer_R, 2}, - {"XGBoosterModelToRaw_R", (DL_FUNC) &XGBoosterModelToRaw_R, 1}, - {"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 5}, - {"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2}, - {"XGBoosterSetAttr_R", (DL_FUNC) &XGBoosterSetAttr_R, 3}, - {"XGBoosterSetParam_R", (DL_FUNC) &XGBoosterSetParam_R, 3}, - {"XGBoosterUpdateOneIter_R", (DL_FUNC) &XGBoosterUpdateOneIter_R, 3}, - {"XGCheckNullPtr_R", (DL_FUNC) &XGCheckNullPtr_R, 1}, - {"XGDMatrixCreateFromCSC_R", (DL_FUNC) &XGDMatrixCreateFromCSC_R, 4}, - {"XGDMatrixCreateFromFile_R", (DL_FUNC) &XGDMatrixCreateFromFile_R, 2}, - {"XGDMatrixCreateFromMat_R", (DL_FUNC) &XGDMatrixCreateFromMat_R, 2}, - {"XGDMatrixGetInfo_R", (DL_FUNC) &XGDMatrixGetInfo_R, 2}, - {"XGDMatrixNumCol_R", (DL_FUNC) &XGDMatrixNumCol_R, 1}, - {"XGDMatrixNumRow_R", (DL_FUNC) &XGDMatrixNumRow_R, 1}, - {"XGDMatrixSaveBinary_R", (DL_FUNC) &XGDMatrixSaveBinary_R, 3}, - {"XGDMatrixSetInfo_R", (DL_FUNC) &XGDMatrixSetInfo_R, 3}, - {"XGDMatrixSliceDMatrix_R", (DL_FUNC) &XGDMatrixSliceDMatrix_R, 2}, - {NULL, NULL, 0} -}; - -#if defined(_WIN32) -__declspec(dllexport) -#endif // defined(_WIN32) -void R_init_xgboost(DllInfo *dll) { - R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); - R_useDynamicSymbols(dll, FALSE); -} diff --git a/ml-xgboost/R-package/src/xgboost_R.cc b/ml-xgboost/R-package/src/xgboost_R.cc deleted file mode 100644 index cb86ef4..0000000 --- a/ml-xgboost/R-package/src/xgboost_R.cc +++ /dev/null @@ -1,491 +0,0 @@ -// Copyright (c) 2014 by Contributors -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "./xgboost_R.h" - -/*! - * \brief macro to annotate begin of api - */ -#define R_API_BEGIN() \ - GetRNGstate(); \ - try { -/*! - * \brief macro to annotate end of api - */ -#define R_API_END() \ - } catch(dmlc::Error& e) { \ - PutRNGstate(); \ - error(e.what()); \ - } \ - PutRNGstate(); - -/*! - * \brief macro to check the call. - */ -#define CHECK_CALL(x) \ - if ((x) != 0) { \ - error(XGBGetLastError()); \ - } - - -using namespace dmlc; - -SEXP XGCheckNullPtr_R(SEXP handle) { - return ScalarLogical(R_ExternalPtrAddr(handle) == NULL); -} - -void _DMatrixFinalizer(SEXP ext) { - R_API_BEGIN(); - if (R_ExternalPtrAddr(ext) == NULL) return; - CHECK_CALL(XGDMatrixFree(R_ExternalPtrAddr(ext))); - R_ClearExternalPtr(ext); - R_API_END(); -} - -SEXP XGDMatrixCreateFromFile_R(SEXP fname, SEXP silent) { - SEXP ret; - R_API_BEGIN(); - DMatrixHandle handle; - CHECK_CALL(XGDMatrixCreateFromFile(CHAR(asChar(fname)), asInteger(silent), &handle)); - ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue)); - R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE); - R_API_END(); - UNPROTECT(1); - return ret; -} - -SEXP XGDMatrixCreateFromMat_R(SEXP mat, - SEXP missing) { - SEXP ret; - R_API_BEGIN(); - SEXP dim = getAttrib(mat, R_DimSymbol); - size_t nrow = static_cast(INTEGER(dim)[0]); - size_t ncol = static_cast(INTEGER(dim)[1]); - const bool is_int = TYPEOF(mat) == INTSXP; - double *din; - int *iin; - if (is_int) { - iin = INTEGER(mat); - } else { - din = REAL(mat); - } - std::vector data(nrow * ncol); - #pragma omp parallel for schedule(static) - for (omp_ulong i = 0; i < nrow; ++i) { - for (size_t j = 0; j < ncol; ++j) { - data[i * ncol +j] = is_int ? static_cast(iin[i + nrow * j]) : din[i + nrow * j]; - } - } - DMatrixHandle handle; - CHECK_CALL(XGDMatrixCreateFromMat(BeginPtr(data), nrow, ncol, asReal(missing), &handle)); - ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue)); - R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE); - R_API_END(); - UNPROTECT(1); - return ret; -} - -SEXP XGDMatrixCreateFromCSC_R(SEXP indptr, - SEXP indices, - SEXP data, - SEXP num_row) { - SEXP ret; - R_API_BEGIN(); - const int *p_indptr = INTEGER(indptr); - const int *p_indices = INTEGER(indices); - const double *p_data = REAL(data); - size_t nindptr = static_cast(length(indptr)); - size_t ndata = static_cast(length(data)); - size_t nrow = static_cast(INTEGER(num_row)[0]); - std::vector col_ptr_(nindptr); - std::vector indices_(ndata); - std::vector data_(ndata); - - for (size_t i = 0; i < nindptr; ++i) { - col_ptr_[i] = static_cast(p_indptr[i]); - } - #pragma omp parallel for schedule(static) - for (int64_t i = 0; i < static_cast(ndata); ++i) { - indices_[i] = static_cast(p_indices[i]); - data_[i] = static_cast(p_data[i]); - } - DMatrixHandle handle; - CHECK_CALL(XGDMatrixCreateFromCSCEx(BeginPtr(col_ptr_), BeginPtr(indices_), - BeginPtr(data_), nindptr, ndata, - nrow, &handle)); - ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue)); - R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE); - R_API_END(); - UNPROTECT(1); - return ret; -} - -SEXP XGDMatrixSliceDMatrix_R(SEXP handle, SEXP idxset) { - SEXP ret; - R_API_BEGIN(); - int len = length(idxset); - std::vector idxvec(len); - for (int i = 0; i < len; ++i) { - idxvec[i] = INTEGER(idxset)[i] - 1; - } - DMatrixHandle res; - CHECK_CALL(XGDMatrixSliceDMatrixEx(R_ExternalPtrAddr(handle), - BeginPtr(idxvec), len, - &res, - 0)); - ret = PROTECT(R_MakeExternalPtr(res, R_NilValue, R_NilValue)); - R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE); - R_API_END(); - UNPROTECT(1); - return ret; -} - -SEXP XGDMatrixSaveBinary_R(SEXP handle, SEXP fname, SEXP silent) { - R_API_BEGIN(); - CHECK_CALL(XGDMatrixSaveBinary(R_ExternalPtrAddr(handle), - CHAR(asChar(fname)), - asInteger(silent))); - R_API_END(); - return R_NilValue; -} - -SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) { - R_API_BEGIN(); - int len = length(array); - const char *name = CHAR(asChar(field)); - if (!strcmp("group", name)) { - std::vector vec(len); - #pragma omp parallel for schedule(static) - for (int i = 0; i < len; ++i) { - vec[i] = static_cast(INTEGER(array)[i]); - } - CHECK_CALL(XGDMatrixSetUIntInfo(R_ExternalPtrAddr(handle), - CHAR(asChar(field)), - BeginPtr(vec), len)); - } else { - std::vector vec(len); - #pragma omp parallel for schedule(static) - for (int i = 0; i < len; ++i) { - vec[i] = REAL(array)[i]; - } - CHECK_CALL(XGDMatrixSetFloatInfo(R_ExternalPtrAddr(handle), - CHAR(asChar(field)), - BeginPtr(vec), len)); - } - R_API_END(); - return R_NilValue; -} - -SEXP XGDMatrixGetInfo_R(SEXP handle, SEXP field) { - SEXP ret; - R_API_BEGIN(); - bst_ulong olen; - const float *res; - CHECK_CALL(XGDMatrixGetFloatInfo(R_ExternalPtrAddr(handle), - CHAR(asChar(field)), - &olen, - &res)); - ret = PROTECT(allocVector(REALSXP, olen)); - for (size_t i = 0; i < olen; ++i) { - REAL(ret)[i] = res[i]; - } - R_API_END(); - UNPROTECT(1); - return ret; -} - -SEXP XGDMatrixNumRow_R(SEXP handle) { - bst_ulong nrow; - R_API_BEGIN(); - CHECK_CALL(XGDMatrixNumRow(R_ExternalPtrAddr(handle), &nrow)); - R_API_END(); - return ScalarInteger(static_cast(nrow)); -} - -SEXP XGDMatrixNumCol_R(SEXP handle) { - bst_ulong ncol; - R_API_BEGIN(); - CHECK_CALL(XGDMatrixNumCol(R_ExternalPtrAddr(handle), &ncol)); - R_API_END(); - return ScalarInteger(static_cast(ncol)); -} - -// functions related to booster -void _BoosterFinalizer(SEXP ext) { - if (R_ExternalPtrAddr(ext) == NULL) return; - CHECK_CALL(XGBoosterFree(R_ExternalPtrAddr(ext))); - R_ClearExternalPtr(ext); -} - -SEXP XGBoosterCreate_R(SEXP dmats) { - SEXP ret; - R_API_BEGIN(); - int len = length(dmats); - std::vector dvec; - for (int i = 0; i < len; ++i) { - dvec.push_back(R_ExternalPtrAddr(VECTOR_ELT(dmats, i))); - } - BoosterHandle handle; - CHECK_CALL(XGBoosterCreate(BeginPtr(dvec), dvec.size(), &handle)); - ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue)); - R_RegisterCFinalizerEx(ret, _BoosterFinalizer, TRUE); - R_API_END(); - UNPROTECT(1); - return ret; -} - -SEXP XGBoosterSetParam_R(SEXP handle, SEXP name, SEXP val) { - R_API_BEGIN(); - CHECK_CALL(XGBoosterSetParam(R_ExternalPtrAddr(handle), - CHAR(asChar(name)), - CHAR(asChar(val)))); - R_API_END(); - return R_NilValue; -} - -SEXP XGBoosterUpdateOneIter_R(SEXP handle, SEXP iter, SEXP dtrain) { - R_API_BEGIN(); - CHECK_CALL(XGBoosterUpdateOneIter(R_ExternalPtrAddr(handle), - asInteger(iter), - R_ExternalPtrAddr(dtrain))); - R_API_END(); - return R_NilValue; -} - -SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP hess) { - R_API_BEGIN(); - CHECK_EQ(length(grad), length(hess)) - << "gradient and hess must have same length"; - int len = length(grad); - std::vector tgrad(len), thess(len); - #pragma omp parallel for schedule(static) - for (int j = 0; j < len; ++j) { - tgrad[j] = REAL(grad)[j]; - thess[j] = REAL(hess)[j]; - } - CHECK_CALL(XGBoosterBoostOneIter(R_ExternalPtrAddr(handle), - R_ExternalPtrAddr(dtrain), - BeginPtr(tgrad), BeginPtr(thess), - len)); - R_API_END(); - return R_NilValue; -} - -SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames) { - const char *ret; - R_API_BEGIN(); - CHECK_EQ(length(dmats), length(evnames)) - << "dmats and evnams must have same length"; - int len = length(dmats); - std::vector vec_dmats; - std::vector vec_names; - std::vector vec_sptr; - for (int i = 0; i < len; ++i) { - vec_dmats.push_back(R_ExternalPtrAddr(VECTOR_ELT(dmats, i))); - vec_names.push_back(std::string(CHAR(asChar(VECTOR_ELT(evnames, i))))); - } - for (int i = 0; i < len; ++i) { - vec_sptr.push_back(vec_names[i].c_str()); - } - CHECK_CALL(XGBoosterEvalOneIter(R_ExternalPtrAddr(handle), - asInteger(iter), - BeginPtr(vec_dmats), - BeginPtr(vec_sptr), - len, &ret)); - R_API_END(); - return mkString(ret); -} - -SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, - SEXP ntree_limit, SEXP training) { - SEXP ret; - R_API_BEGIN(); - bst_ulong olen; - const float *res; - CHECK_CALL(XGBoosterPredict(R_ExternalPtrAddr(handle), - R_ExternalPtrAddr(dmat), - asInteger(option_mask), - asInteger(ntree_limit), - asInteger(training), - &olen, &res)); - ret = PROTECT(allocVector(REALSXP, olen)); - for (size_t i = 0; i < olen; ++i) { - REAL(ret)[i] = res[i]; - } - R_API_END(); - UNPROTECT(1); - return ret; -} - -SEXP XGBoosterLoadModel_R(SEXP handle, SEXP fname) { - R_API_BEGIN(); - CHECK_CALL(XGBoosterLoadModel(R_ExternalPtrAddr(handle), CHAR(asChar(fname)))); - R_API_END(); - return R_NilValue; -} - -SEXP XGBoosterSaveModel_R(SEXP handle, SEXP fname) { - R_API_BEGIN(); - CHECK_CALL(XGBoosterSaveModel(R_ExternalPtrAddr(handle), CHAR(asChar(fname)))); - R_API_END(); - return R_NilValue; -} - -SEXP XGBoosterModelToRaw_R(SEXP handle) { - SEXP ret; - R_API_BEGIN(); - bst_ulong olen; - const char *raw; - CHECK_CALL(XGBoosterGetModelRaw(R_ExternalPtrAddr(handle), &olen, &raw)); - ret = PROTECT(allocVector(RAWSXP, olen)); - if (olen != 0) { - memcpy(RAW(ret), raw, olen); - } - R_API_END(); - UNPROTECT(1); - return ret; -} - -SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) { - R_API_BEGIN(); - CHECK_CALL(XGBoosterLoadModelFromBuffer(R_ExternalPtrAddr(handle), - RAW(raw), - length(raw))); - R_API_END(); - return R_NilValue; -} - -SEXP XGBoosterSaveJsonConfig_R(SEXP handle) { - const char* ret; - R_API_BEGIN(); - bst_ulong len {0}; - CHECK_CALL(XGBoosterSaveJsonConfig(R_ExternalPtrAddr(handle), - &len, - &ret)); - R_API_END(); - return mkString(ret); -} - -SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value) { - R_API_BEGIN(); - XGBoosterLoadJsonConfig(R_ExternalPtrAddr(handle), CHAR(asChar(value))); - R_API_END(); - return R_NilValue; -} - -SEXP XGBoosterSerializeToBuffer_R(SEXP handle) { - SEXP ret; - R_API_BEGIN(); - bst_ulong out_len; - const char *raw; - CHECK_CALL(XGBoosterSerializeToBuffer(R_ExternalPtrAddr(handle), &out_len, &raw)); - ret = PROTECT(allocVector(RAWSXP, out_len)); - if (out_len != 0) { - memcpy(RAW(ret), raw, out_len); - } - R_API_END(); - UNPROTECT(1); - return ret; -} - -SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw) { - R_API_BEGIN(); - XGBoosterUnserializeFromBuffer(R_ExternalPtrAddr(handle), - RAW(raw), - length(raw)); - R_API_END(); - return R_NilValue; -} - -SEXP XGBoosterDumpModel_R(SEXP handle, SEXP fmap, SEXP with_stats, SEXP dump_format) { - SEXP out; - R_API_BEGIN(); - bst_ulong olen; - const char **res; - const char *fmt = CHAR(asChar(dump_format)); - CHECK_CALL(XGBoosterDumpModelEx(R_ExternalPtrAddr(handle), - CHAR(asChar(fmap)), - asInteger(with_stats), - fmt, - &olen, &res)); - out = PROTECT(allocVector(STRSXP, olen)); - if (!strcmp("json", fmt)) { - std::stringstream stream; - stream << "[\n"; - for (size_t i = 0; i < olen; ++i) { - stream << res[i]; - if (i < olen - 1) { - stream << ",\n"; - } else { - stream << "\n"; - } - } - stream << "]"; - SET_STRING_ELT(out, 0, mkChar(stream.str().c_str())); - } else { - for (size_t i = 0; i < olen; ++i) { - std::stringstream stream; - stream << "booster[" << i <<"]\n" << res[i]; - SET_STRING_ELT(out, i, mkChar(stream.str().c_str())); - } - } - R_API_END(); - UNPROTECT(1); - return out; -} - -SEXP XGBoosterGetAttr_R(SEXP handle, SEXP name) { - SEXP out; - R_API_BEGIN(); - int success; - const char *val; - CHECK_CALL(XGBoosterGetAttr(R_ExternalPtrAddr(handle), - CHAR(asChar(name)), - &val, - &success)); - if (success) { - out = PROTECT(allocVector(STRSXP, 1)); - SET_STRING_ELT(out, 0, mkChar(val)); - } else { - out = PROTECT(R_NilValue); - } - R_API_END(); - UNPROTECT(1); - return out; -} - -SEXP XGBoosterSetAttr_R(SEXP handle, SEXP name, SEXP val) { - R_API_BEGIN(); - const char *v = isNull(val) ? nullptr : CHAR(asChar(val)); - CHECK_CALL(XGBoosterSetAttr(R_ExternalPtrAddr(handle), - CHAR(asChar(name)), v)); - R_API_END(); - return R_NilValue; -} - -SEXP XGBoosterGetAttrNames_R(SEXP handle) { - SEXP out; - R_API_BEGIN(); - bst_ulong len; - const char **res; - CHECK_CALL(XGBoosterGetAttrNames(R_ExternalPtrAddr(handle), - &len, &res)); - if (len > 0) { - out = PROTECT(allocVector(STRSXP, len)); - for (size_t i = 0; i < len; ++i) { - SET_STRING_ELT(out, i, mkChar(res[i])); - } - } else { - out = PROTECT(R_NilValue); - } - R_API_END(); - UNPROTECT(1); - return out; -} diff --git a/ml-xgboost/R-package/src/xgboost_R.h b/ml-xgboost/R-package/src/xgboost_R.h deleted file mode 100644 index be16ff9..0000000 --- a/ml-xgboost/R-package/src/xgboost_R.h +++ /dev/null @@ -1,247 +0,0 @@ -/*! - * Copyright 2014 (c) by Contributors - * \file xgboost_R.h - * \author Tianqi Chen - * \brief R wrapper of xgboost - */ -#ifndef XGBOOST_R_H_ // NOLINT(*) -#define XGBOOST_R_H_ // NOLINT(*) - - -#include -#include -#include - -#include - -/*! - * \brief check whether a handle is NULL - * \param handle - * \return whether it is null ptr - */ -XGB_DLL SEXP XGCheckNullPtr_R(SEXP handle); - -/*! - * \brief load a data matrix - * \param fname name of the content - * \param silent whether print messages - * \return a loaded data matrix - */ -XGB_DLL SEXP XGDMatrixCreateFromFile_R(SEXP fname, SEXP silent); - -/*! - * \brief create matrix content from dense matrix - * This assumes the matrix is stored in column major format - * \param data R Matrix object - * \param missing which value to represent missing value - * \return created dmatrix - */ -XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat, - SEXP missing); -/*! - * \brief create a matrix content from CSC format - * \param indptr pointer to column headers - * \param indices row indices - * \param data content of the data - * \param num_row numer of rows (when it's set to 0, then guess from data) - * \return created dmatrix - */ -XGB_DLL SEXP XGDMatrixCreateFromCSC_R(SEXP indptr, - SEXP indices, - SEXP data, - SEXP num_row); - -/*! - * \brief create a new dmatrix from sliced content of existing matrix - * \param handle instance of data matrix to be sliced - * \param idxset index set - * \return a sliced new matrix - */ -XGB_DLL SEXP XGDMatrixSliceDMatrix_R(SEXP handle, SEXP idxset); - -/*! - * \brief load a data matrix into binary file - * \param handle a instance of data matrix - * \param fname file name - * \param silent print statistics when saving - * \return R_NilValue - */ -XGB_DLL SEXP XGDMatrixSaveBinary_R(SEXP handle, SEXP fname, SEXP silent); - -/*! - * \brief set information to dmatrix - * \param handle a instance of data matrix - * \param field field name, can be label, weight - * \param array pointer to float vector - * \return R_NilValue - */ -XGB_DLL SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array); - -/*! - * \brief get info vector from matrix - * \param handle a instance of data matrix - * \param field field name - * \return info vector - */ -XGB_DLL SEXP XGDMatrixGetInfo_R(SEXP handle, SEXP field); - -/*! - * \brief return number of rows - * \param handle an instance of data matrix - */ -XGB_DLL SEXP XGDMatrixNumRow_R(SEXP handle); - -/*! - * \brief return number of columns - * \param handle an instance of data matrix - */ -XGB_DLL SEXP XGDMatrixNumCol_R(SEXP handle); - -/*! - * \brief create xgboost learner - * \param dmats a list of dmatrix handles that will be cached - */ -XGB_DLL SEXP XGBoosterCreate_R(SEXP dmats); - -/*! - * \brief set parameters - * \param handle handle - * \param name parameter name - * \param val value of parameter - * \return R_NilValue - */ -XGB_DLL SEXP XGBoosterSetParam_R(SEXP handle, SEXP name, SEXP val); - -/*! - * \brief update the model in one round using dtrain - * \param handle handle - * \param iter current iteration rounds - * \param dtrain training data - * \return R_NilValue - */ -XGB_DLL SEXP XGBoosterUpdateOneIter_R(SEXP ext, SEXP iter, SEXP dtrain); - -/*! - * \brief update the model, by directly specify gradient and second order gradient, - * this can be used to replace UpdateOneIter, to support customized loss function - * \param handle handle - * \param dtrain training data - * \param grad gradient statistics - * \param hess second order gradient statistics - * \return R_NilValue - */ -XGB_DLL SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP hess); - -/*! - * \brief get evaluation statistics for xgboost - * \param handle handle - * \param iter current iteration rounds - * \param dmats list of handles to dmatrices - * \param evname name of evaluation - * \return the string containing evaluation stats - */ -XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames); - -/*! - * \brief make prediction based on dmat - * \param handle handle - * \param dmat data matrix - * \param option_mask output_margin:1 predict_leaf:2 - * \param ntree_limit limit number of trees used in prediction - * \param training Whether the prediction value is used for training. - */ -XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, - SEXP ntree_limit, SEXP training); -/*! - * \brief load model from existing file - * \param handle handle - * \param fname file name - * \return R_NilValue - */ -XGB_DLL SEXP XGBoosterLoadModel_R(SEXP handle, SEXP fname); - -/*! - * \brief save model into existing file - * \param handle handle - * \param fname file name - * \return R_NilValue - */ -XGB_DLL SEXP XGBoosterSaveModel_R(SEXP handle, SEXP fname); - -/*! - * \brief load model from raw array - * \param handle handle - * \return R_NilValue - */ -XGB_DLL SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw); - -/*! - * \brief save model into R's raw array - * \param handle handle - * \return raw array - */ -XGB_DLL SEXP XGBoosterModelToRaw_R(SEXP handle); - -/*! - * \brief Save internal parameters as a JSON string - * \param handle handle - * \return JSON string - */ - -XGB_DLL SEXP XGBoosterSaveJsonConfig_R(SEXP handle); -/*! - * \brief Load the JSON string returnd by XGBoosterSaveJsonConfig_R - * \param handle handle - * \param value JSON string - * \return R_NilValue - */ -XGB_DLL SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value); - -/*! - * \brief Memory snapshot based serialization method. Saves everything states - * into buffer. - * \param handle handle to booster - */ -XGB_DLL SEXP XGBoosterSerializeToBuffer_R(SEXP handle); - -/*! - * \brief Memory snapshot based serialization method. Loads the buffer returned - * from `XGBoosterSerializeToBuffer'. - * \param handle handle to booster - * \return raw byte array - */ -XGB_DLL SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw); - -/*! - * \brief dump model into a string - * \param handle handle - * \param fmap name to fmap can be empty string - * \param with_stats whether dump statistics of splits - * \param dump_format the format to dump the model in - */ -XGB_DLL SEXP XGBoosterDumpModel_R(SEXP handle, SEXP fmap, SEXP with_stats, SEXP dump_format); - -/*! - * \brief get learner attribute value - * \param handle handle - * \param name attribute name - * \return character containing attribute value - */ -XGB_DLL SEXP XGBoosterGetAttr_R(SEXP handle, SEXP name); - -/*! - * \brief set learner attribute value - * \param handle handle - * \param name attribute name - * \param val attribute value; NULL value would delete an attribute - * \return R_NilValue - */ -XGB_DLL SEXP XGBoosterSetAttr_R(SEXP handle, SEXP name, SEXP val); - -/*! - * \brief get the names of learner attributes - * \return string vector containing attribute names - */ -XGB_DLL SEXP XGBoosterGetAttrNames_R(SEXP handle); - -#endif // XGBOOST_WRAPPER_R_H_ // NOLINT(*) diff --git a/ml-xgboost/R-package/src/xgboost_assert.c b/ml-xgboost/R-package/src/xgboost_assert.c deleted file mode 100644 index 4706a03..0000000 --- a/ml-xgboost/R-package/src/xgboost_assert.c +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2014 by Contributors -#include -#include -#include - -// implements error handling -void XGBoostAssert_R(int exp, const char *fmt, ...) { - char buf[1024]; - if (exp == 0) { - va_list args; - va_start(args, fmt); - vsprintf(buf, fmt, args); - va_end(args); - error("AssertError:%s\n", buf); - } -} -void XGBoostCheck_R(int exp, const char *fmt, ...) { - char buf[1024]; - if (exp == 0) { - va_list args; - va_start(args, fmt); - vsprintf(buf, fmt, args); - va_end(args); - error("%s\n", buf); - } -} diff --git a/ml-xgboost/R-package/src/xgboost_custom.cc b/ml-xgboost/R-package/src/xgboost_custom.cc deleted file mode 100644 index 2387e72..0000000 --- a/ml-xgboost/R-package/src/xgboost_custom.cc +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2015 by Contributors -// This file contains the customization implementations of R module -// to change behavior of libxgboost - -#include -#include "../../src/common/random.h" -#include "./xgboost_R.h" - -// redirect the messages to R's console. -namespace dmlc { -void CustomLogMessage::Log(const std::string& msg) { - Rprintf("%s\n", msg.c_str()); -} -} // namespace dmlc - -// implements rabit error handling. -extern "C" { - void XGBoostAssert_R(int exp, const char *fmt, ...); - void XGBoostCheck_R(int exp, const char *fmt, ...); -} - -namespace rabit { -namespace utils { -extern "C" { - void (*Printf)(const char *fmt, ...) = Rprintf; - void (*Assert)(int exp, const char *fmt, ...) = XGBoostAssert_R; - void (*Check)(int exp, const char *fmt, ...) = XGBoostCheck_R; - void (*Error)(const char *fmt, ...) = error; -} -} -} - -namespace xgboost { -ConsoleLogger::~ConsoleLogger() { - if (cur_verbosity_ == LogVerbosity::kIgnore || - cur_verbosity_ <= global_verbosity_) { - dmlc::CustomLogMessage::Log(log_stream_.str()); - } -} -TrackerLogger::~TrackerLogger() { - dmlc::CustomLogMessage::Log(log_stream_.str()); -} -} // namespace xgboost - -namespace xgboost { -namespace common { - -// redirect the nath functions. -bool CheckNAN(double v) { - return ISNAN(v); -} -#if !defined(XGBOOST_USE_CUDA) -double LogGamma(double v) { - return lgammafn(v); -} -#endif // !defined(XGBOOST_USE_CUDA) -// customize random engine. -void CustomGlobalRandomEngine::seed(CustomGlobalRandomEngine::result_type val) { - // ignore the seed -} - -// use R's PRNG to replacd -CustomGlobalRandomEngine::result_type -CustomGlobalRandomEngine::operator()() { - return static_cast( - std::floor(unif_rand() * CustomGlobalRandomEngine::max())); -} -} // namespace common -} // namespace xgboost diff --git a/ml-xgboost/R-package/tests/testthat.R b/ml-xgboost/R-package/tests/testthat.R deleted file mode 100644 index 53cc6ca..0000000 --- a/ml-xgboost/R-package/tests/testthat.R +++ /dev/null @@ -1,4 +0,0 @@ -library(testthat) -library(xgboost) - -test_check("xgboost") diff --git a/ml-xgboost/R-package/tests/testthat/test_basic.R b/ml-xgboost/R-package/tests/testthat/test_basic.R deleted file mode 100644 index b23e4dd..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_basic.R +++ /dev/null @@ -1,384 +0,0 @@ -require(xgboost) - -context("basic functions") - -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test -set.seed(1994) - -# disable some tests for Win32 -windows_flag = .Platform$OS.type == "windows" && - .Machine$sizeof.pointer != 8 -solaris_flag = (Sys.info()['sysname'] == "SunOS") - -test_that("train and predict binary classification", { - nrounds = 2 - expect_output( - bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic") - , "train-error") - expect_equal(class(bst), "xgb.Booster") - expect_equal(bst$niter, nrounds) - expect_false(is.null(bst$evaluation_log)) - expect_equal(nrow(bst$evaluation_log), nrounds) - expect_lt(bst$evaluation_log[, min(train_error)], 0.03) - - pred <- predict(bst, test$data) - expect_length(pred, 1611) - - pred1 <- predict(bst, train$data, ntreelimit = 1) - expect_length(pred1, 6513) - err_pred1 <- sum((pred1 > 0.5) != train$label)/length(train$label) - err_log <- bst$evaluation_log[1, train_error] - expect_lt(abs(err_pred1 - err_log), 10e-6) -}) - -test_that("parameter validation works", { - p <- list(foo = "bar") - nrounds = 1 - set.seed(1994) - - d <- cbind( - x1 = rnorm(10), - x2 = rnorm(10), - x3 = rnorm(10)) - y <- d[,"x1"] + d[,"x2"]^2 + - ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) + - rnorm(10) - dtrain <- xgb.DMatrix(data=d, info = list(label=y)) - - correct <- function() { - params <- list(max_depth = 2, booster = "dart", - rate_drop = 0.5, one_drop = TRUE, - objective = "reg:squarederror") - xgb.train(params = params, data = dtrain, nrounds = nrounds) - } - expect_silent(correct()) - incorrect <- function() { - params <- list(max_depth = 2, booster = "dart", - rate_drop = 0.5, one_drop = TRUE, - objective = "reg:squarederror", - foo = "bar", bar = "foo") - output <- capture.output( - xgb.train(params = params, data = dtrain, nrounds = nrounds)) - print(output) - } - expect_output(incorrect(), "bar, foo") -}) - - -test_that("dart prediction works", { - nrounds = 32 - set.seed(1994) - - d <- cbind( - x1 = rnorm(100), - x2 = rnorm(100), - x3 = rnorm(100)) - y <- d[,"x1"] + d[,"x2"]^2 + - ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) + - rnorm(100) - - set.seed(1994) - booster_by_xgboost <- xgboost(data = d, label = y, max_depth = 2, booster = "dart", - rate_drop = 0.5, one_drop = TRUE, - eta = 1, nthread = 2, nrounds = nrounds, objective = "reg:squarederror") - pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0) - pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds) - expect_true(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE))) - - pred_by_xgboost_2 <- predict(booster_by_xgboost, newdata = d, training = TRUE) - expect_false(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE))) - - set.seed(1994) - dtrain <- xgb.DMatrix(data=d, info = list(label=y)) - booster_by_train <- xgb.train( params = list( - booster = "dart", - max_depth = 2, - eta = 1, - rate_drop = 0.5, - one_drop = TRUE, - nthread = 1, - tree_method= "exact", - objective = "reg:squarederror" - ), - data = dtrain, - nrounds = nrounds - ) - pred_by_train_0 <- predict(booster_by_train, newdata = dtrain, ntreelimit = 0) - pred_by_train_1 <- predict(booster_by_train, newdata = dtrain, ntreelimit = nrounds) - pred_by_train_2 <- predict(booster_by_train, newdata = dtrain, training = TRUE) - - expect_true(all(matrix(pred_by_train_0, byrow=TRUE) == matrix(pred_by_xgboost_0, byrow=TRUE))) - expect_true(all(matrix(pred_by_train_1, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE))) - expect_true(all(matrix(pred_by_train_2, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE))) -}) - -test_that("train and predict softprob", { - lb <- as.numeric(iris$Species) - 1 - set.seed(11) - expect_output( - bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, - max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5, - objective = "multi:softprob", num_class=3) - , "train-merror") - expect_false(is.null(bst$evaluation_log)) - expect_lt(bst$evaluation_log[, min(train_merror)], 0.025) - expect_equal(bst$niter * 3, xgb.ntree(bst)) - pred <- predict(bst, as.matrix(iris[, -5])) - expect_length(pred, nrow(iris) * 3) - # row sums add up to total probability of 1: - expect_equal(rowSums(matrix(pred, ncol=3, byrow=TRUE)), rep(1, nrow(iris)), tolerance = 1e-7) - # manually calculate error at the last iteration: - mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE) - expect_equal(as.numeric(t(mpred)), pred) - pred_labels <- max.col(mpred) - 1 - err <- sum(pred_labels != lb)/length(lb) - expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6) - # manually calculate error at the 1st iteration: - mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, ntreelimit = 1) - pred_labels <- max.col(mpred) - 1 - err <- sum(pred_labels != lb)/length(lb) - expect_equal(bst$evaluation_log[1, train_merror], err, tolerance = 5e-6) -}) - -test_that("train and predict softmax", { - lb <- as.numeric(iris$Species) - 1 - set.seed(11) - expect_output( - bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, - max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5, - objective = "multi:softmax", num_class=3) - , "train-merror") - expect_false(is.null(bst$evaluation_log)) - expect_lt(bst$evaluation_log[, min(train_merror)], 0.025) - expect_equal(bst$niter * 3, xgb.ntree(bst)) - - pred <- predict(bst, as.matrix(iris[, -5])) - expect_length(pred, nrow(iris)) - err <- sum(pred != lb)/length(lb) - expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6) -}) - -test_that("train and predict RF", { - set.seed(11) - lb <- train$label - # single iteration - bst <- xgboost(data = train$data, label = lb, max_depth = 5, - nthread = 2, nrounds = 1, objective = "binary:logistic", - num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1) - expect_equal(bst$niter, 1) - expect_equal(xgb.ntree(bst), 20) - - pred <- predict(bst, train$data) - pred_err <- sum((pred > 0.5) != lb)/length(lb) - expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6) - #expect_lt(pred_err, 0.03) - - pred <- predict(bst, train$data, ntreelimit = 20) - pred_err_20 <- sum((pred > 0.5) != lb)/length(lb) - expect_equal(pred_err_20, pred_err) - - #pred <- predict(bst, train$data, ntreelimit = 1) - #pred_err_1 <- sum((pred > 0.5) != lb)/length(lb) - #expect_lt(pred_err, pred_err_1) - #expect_lt(pred_err, 0.08) -}) - -test_that("train and predict RF with softprob", { - lb <- as.numeric(iris$Species) - 1 - nrounds <- 15 - set.seed(11) - bst <- xgboost(data = as.matrix(iris[, -5]), label = lb, - max_depth = 3, eta = 0.9, nthread = 2, nrounds = nrounds, - objective = "multi:softprob", num_class=3, verbose = 0, - num_parallel_tree = 4, subsample = 0.5, colsample_bytree = 0.5) - expect_equal(bst$niter, 15) - expect_equal(xgb.ntree(bst), 15*3*4) - # predict for all iterations: - pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE) - expect_equal(dim(pred), c(nrow(iris), 3)) - pred_labels <- max.col(pred) - 1 - err <- sum(pred_labels != lb)/length(lb) - expect_equal(bst$evaluation_log[nrounds, train_merror], err, tolerance = 5e-6) - # predict for 7 iterations and adjust for 4 parallel trees per iteration - pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE, ntreelimit = 7 * 4) - err <- sum((max.col(pred) - 1) != lb)/length(lb) - expect_equal(bst$evaluation_log[7, train_merror], err, tolerance = 5e-6) -}) - -test_that("use of multiple eval metrics works", { - expect_output( - bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", - eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss") - , "train-error.*train-auc.*train-logloss") - expect_false(is.null(bst$evaluation_log)) - expect_equal(dim(bst$evaluation_log), c(2, 4)) - expect_equal(colnames(bst$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss")) -}) - - -test_that("training continuation works", { - dtrain <- xgb.DMatrix(train$data, label = train$label) - watchlist = list(train=dtrain) - param <- list(objective = "binary:logistic", max_depth = 2, eta = 1, nthread = 2) - - # for the reference, use 4 iterations at once: - set.seed(11) - bst <- xgb.train(param, dtrain, nrounds = 4, watchlist, verbose = 0) - # first two iterations: - set.seed(11) - bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0) - # continue for two more: - bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = bst1) - if (!windows_flag && !solaris_flag) - expect_equal(bst$raw, bst2$raw) - expect_false(is.null(bst2$evaluation_log)) - expect_equal(dim(bst2$evaluation_log), c(4, 2)) - expect_equal(bst2$evaluation_log, bst$evaluation_log) - # test continuing from raw model data - bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = bst1$raw) - if (!windows_flag && !solaris_flag) - expect_equal(bst$raw, bst2$raw) - expect_equal(dim(bst2$evaluation_log), c(2, 2)) - # test continuing from a model in file - xgb.save(bst1, "xgboost.model") - bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = "xgboost.model") - if (!windows_flag && !solaris_flag) - expect_equal(bst$raw, bst2$raw) - expect_equal(dim(bst2$evaluation_log), c(2, 2)) -}) - -test_that("model serialization works", { - out_path <- "model_serialization" - dtrain <- xgb.DMatrix(train$data, label = train$label) - watchlist = list(train=dtrain) - param <- list(objective = "binary:logistic") - booster <- xgb.train(param, dtrain, nrounds = 4, watchlist) - raw <- xgb.serialize(booster) - saveRDS(raw, out_path) - raw <- readRDS(out_path) - - loaded <- xgb.unserialize(raw) - raw_from_loaded <- xgb.serialize(loaded) - expect_equal(raw, raw_from_loaded) - file.remove(out_path) -}) - -test_that("xgb.cv works", { - set.seed(11) - expect_output( - cv <- xgb.cv(data = train$data, label = train$label, max_depth = 2, nfold = 5, - eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic", - verbose=TRUE) - , "train-error:") - expect_is(cv, 'xgb.cv.synchronous') - expect_false(is.null(cv$evaluation_log)) - expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03) - expect_lt(cv$evaluation_log[, min(test_error_std)], 0.008) - expect_equal(cv$niter, 2) - expect_false(is.null(cv$folds) && is.list(cv$folds)) - expect_length(cv$folds, 5) - expect_false(is.null(cv$params) && is.list(cv$params)) - expect_false(is.null(cv$callbacks)) - expect_false(is.null(cv$call)) -}) - -test_that("xgb.cv works with stratified folds", { - dtrain <- xgb.DMatrix(train$data, label = train$label) - set.seed(314159) - cv <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5, - eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic", - verbose=TRUE, stratified = FALSE) - set.seed(314159) - cv2 <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5, - eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic", - verbose=TRUE, stratified = TRUE) - # Stratified folds should result in a different evaluation logs - expect_true(all(cv$evaluation_log[, test_error_mean] != cv2$evaluation_log[, test_error_mean])) -}) - -test_that("train and predict with non-strict classes", { - # standard dense matrix input - train_dense <- as.matrix(train$data) - bst <- xgboost(data = train_dense, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0) - pr0 <- predict(bst, train_dense) - - # dense matrix-like input of non-matrix class - class(train_dense) <- 'shmatrix' - expect_true(is.matrix(train_dense)) - expect_error( - bst <- xgboost(data = train_dense, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0) - , regexp = NA) - expect_error(pr <- predict(bst, train_dense), regexp = NA) - expect_equal(pr0, pr) - - # dense matrix-like input of non-matrix class with some inheritance - class(train_dense) <- c('pphmatrix','shmatrix') - expect_true(is.matrix(train_dense)) - expect_error( - bst <- xgboost(data = train_dense, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0) - , regexp = NA) - expect_error(pr <- predict(bst, train_dense), regexp = NA) - expect_equal(pr0, pr) - - # when someone inhertis from xgb.Booster, it should still be possible to use it as xgb.Booster - class(bst) <- c('super.Booster', 'xgb.Booster') - expect_error(pr <- predict(bst, train_dense), regexp = NA) - expect_equal(pr0, pr) -}) - -test_that("max_delta_step works", { - dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) - watchlist <- list(train = dtrain) - param <- list(objective = "binary:logistic", eval_metric="logloss", max_depth = 2, nthread = 2, eta = 0.5) - nrounds = 5 - # model with no restriction on max_delta_step - bst1 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1) - # model with restricted max_delta_step - bst2 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1, max_delta_step = 1) - # the no-restriction model is expected to have consistently lower loss during the initial interations - expect_true(all(bst1$evaluation_log$train_logloss < bst2$evaluation_log$train_logloss)) - expect_lt(mean(bst1$evaluation_log$train_logloss)/mean(bst2$evaluation_log$train_logloss), 0.8) -}) - -test_that("colsample_bytree works", { - # Randomly generate data matrix by sampling from uniform distribution [-1, 1] - set.seed(1) - train_x <- matrix(runif(1000, min = -1, max = 1), ncol = 100) - train_y <- as.numeric(rowSums(train_x) > 0) - test_x <- matrix(runif(1000, min = -1, max = 1), ncol = 100) - test_y <- as.numeric(rowSums(test_x) > 0) - colnames(train_x) <- paste0("Feature_", sprintf("%03d", 1:100)) - colnames(test_x) <- paste0("Feature_", sprintf("%03d", 1:100)) - dtrain <- xgb.DMatrix(train_x, label = train_y) - dtest <- xgb.DMatrix(test_x, label = test_y) - watchlist <- list(train = dtrain, eval = dtest) - ## Use colsample_bytree = 0.01, so that roughly one out of 100 features is chosen for - ## each tree - param <- list(max_depth = 2, eta = 0, nthread = 2, - colsample_bytree = 0.01, objective = "binary:logistic", - eval_metric = "auc") - set.seed(2) - bst <- xgb.train(param, dtrain, nrounds = 100, watchlist, verbose = 0) - xgb.importance(model = bst) - # If colsample_bytree works properly, a variety of features should be used - # in the 100 trees - expect_gte(nrow(xgb.importance(model = bst)), 30) -}) - -test_that("Configuration works", { - bst <- xgboost(data = train$data, label = train$label, max_depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", - eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss") - config <- xgb.config(bst) - xgb.config(bst) <- config - reloaded_config <- xgb.config(bst) - expect_equal(config, reloaded_config); -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_callbacks.R b/ml-xgboost/R-package/tests/testthat/test_callbacks.R deleted file mode 100644 index e7230d1..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_callbacks.R +++ /dev/null @@ -1,330 +0,0 @@ -# More specific testing of callbacks - -require(xgboost) -require(data.table) - -context("callbacks") - -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test - -# add some label noise for early stopping tests -add.noise <- function(label, frac) { - inoise <- sample(length(label), length(label) * frac) - label[inoise] <- !label[inoise] - label -} -set.seed(11) -ltrain <- add.noise(train$label, 0.2) -ltest <- add.noise(test$label, 0.2) -dtrain <- xgb.DMatrix(train$data, label = ltrain) -dtest <- xgb.DMatrix(test$data, label = ltest) -watchlist = list(train=dtrain, test=dtest) - - -err <- function(label, pr) sum((pr > 0.5) != label)/length(label) - -param <- list(objective = "binary:logistic", max_depth = 2, nthread = 2) - - -test_that("cb.print.evaluation works as expected", { - - bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8) - bst_evaluation_err <- NULL - begin_iteration <- 1 - end_iteration <- 7 - - f0 <- cb.print.evaluation(period=0) - f1 <- cb.print.evaluation(period=1) - f5 <- cb.print.evaluation(period=5) - - expect_false(is.null(attr(f1, 'call'))) - expect_equal(attr(f1, 'name'), 'cb.print.evaluation') - - iteration <- 1 - expect_silent(f0()) - expect_output(f1(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000") - expect_output(f5(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000") - expect_null(f1()) - - iteration <- 2 - expect_output(f1(), "\\[2\\]\ttrain-auc:0.900000\ttest-auc:0.800000") - expect_silent(f5()) - - iteration <- 7 - expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000") - expect_output(f5(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000") - - bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2) - expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\\+0.100000\ttest-auc:0.800000\\+0.200000") -}) - -test_that("cb.evaluation.log works as expected", { - - bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8) - bst_evaluation_err <- NULL - - evaluation_log <- list() - f <- cb.evaluation.log() - - expect_false(is.null(attr(f, 'call'))) - expect_equal(attr(f, 'name'), 'cb.evaluation.log') - - iteration <- 1 - expect_silent(f()) - expect_equal(evaluation_log, - list(c(iter=1, bst_evaluation))) - iteration <- 2 - expect_silent(f()) - expect_equal(evaluation_log, - list(c(iter=1, bst_evaluation), c(iter=2, bst_evaluation))) - expect_silent(f(finalize = TRUE)) - expect_equal(evaluation_log, - data.table(iter=1:2, train_auc=c(0.9,0.9), test_auc=c(0.8,0.8))) - - bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2) - evaluation_log <- list() - f <- cb.evaluation.log() - - iteration <- 1 - expect_silent(f()) - expect_equal(evaluation_log, - list(c(iter=1, c(bst_evaluation, bst_evaluation_err)))) - iteration <- 2 - expect_silent(f()) - expect_equal(evaluation_log, - list(c(iter=1, c(bst_evaluation, bst_evaluation_err)), - c(iter=2, c(bst_evaluation, bst_evaluation_err)))) - expect_silent(f(finalize = TRUE)) - expect_equal(evaluation_log, - data.table(iter=1:2, - train_auc_mean=c(0.9,0.9), train_auc_std=c(0.1,0.1), - test_auc_mean=c(0.8,0.8), test_auc_std=c(0.2,0.2))) -}) - - -param <- list(objective = "binary:logistic", max_depth = 4, nthread = 2) - -test_that("can store evaluation_log without printing", { - expect_silent( - bst <- xgb.train(param, dtrain, nrounds = 10, watchlist, eta = 1, verbose = 0) - ) - expect_false(is.null(bst$evaluation_log)) - expect_false(is.null(bst$evaluation_log$train_error)) - expect_lt(bst$evaluation_log[, min(train_error)], 0.2) -}) - -test_that("cb.reset.parameters works as expected", { - - # fixed eta - set.seed(111) - bst0 <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 0.9, verbose = 0) - expect_false(is.null(bst0$evaluation_log)) - expect_false(is.null(bst0$evaluation_log$train_error)) - - # same eta but re-set as a vector parameter in the callback - set.seed(111) - my_par <- list(eta = c(0.9, 0.9)) - bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, - callbacks = list(cb.reset.parameters(my_par))) - expect_false(is.null(bst1$evaluation_log$train_error)) - expect_equal(bst0$evaluation_log$train_error, - bst1$evaluation_log$train_error) - - # same eta but re-set via a function in the callback - set.seed(111) - my_par <- list(eta = function(itr, itr_end) 0.9) - bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, - callbacks = list(cb.reset.parameters(my_par))) - expect_false(is.null(bst2$evaluation_log$train_error)) - expect_equal(bst0$evaluation_log$train_error, - bst2$evaluation_log$train_error) - - # different eta re-set as a vector parameter in the callback - set.seed(111) - my_par <- list(eta = c(0.6, 0.5)) - bst3 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, - callbacks = list(cb.reset.parameters(my_par))) - expect_false(is.null(bst3$evaluation_log$train_error)) - expect_false(all(bst0$evaluation_log$train_error == bst3$evaluation_log$train_error)) - - # resetting multiple parameters at the same time runs with no error - my_par <- list(eta = c(1., 0.5), gamma = c(1, 2), max_depth = c(4, 8)) - expect_error( - bst4 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, - callbacks = list(cb.reset.parameters(my_par))) - , NA) # NA = no error - # CV works as well - expect_error( - bst4 <- xgb.cv(param, dtrain, nfold = 2, nrounds = 2, verbose = 0, - callbacks = list(cb.reset.parameters(my_par))) - , NA) # NA = no error - - # expect no learning with 0 learning rate - my_par <- list(eta = c(0., 0.)) - bstX <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, - callbacks = list(cb.reset.parameters(my_par))) - expect_false(is.null(bstX$evaluation_log$train_error)) - er <- unique(bstX$evaluation_log$train_error) - expect_length(er, 1) - expect_gt(er, 0.4) -}) - -test_that("cb.save.model works as expected", { - files <- c('xgboost_01.model', 'xgboost_02.model', 'xgboost.model') - for (f in files) if (file.exists(f)) file.remove(f) - - bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0, - save_period = 1, save_name = "xgboost_%02d.model") - expect_true(file.exists('xgboost_01.model')) - expect_true(file.exists('xgboost_02.model')) - b1 <- xgb.load('xgboost_01.model') - expect_equal(xgb.ntree(b1), 1) - b2 <- xgb.load('xgboost_02.model') - expect_equal(xgb.ntree(b2), 2) - - xgb.config(b2) <- xgb.config(bst) - expect_equal(xgb.config(bst), xgb.config(b2)) - expect_equal(bst$raw, b2$raw) - - # save_period = 0 saves the last iteration's model - bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0, - save_period = 0) - expect_true(file.exists('xgboost.model')) - b2 <- xgb.load('xgboost.model') - xgb.config(b2) <- xgb.config(bst) - expect_equal(bst$raw, b2$raw) - - for (f in files) if (file.exists(f)) file.remove(f) -}) - -test_that("early stopping xgb.train works", { - set.seed(11) - expect_output( - bst <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.3, - early_stopping_rounds = 3, maximize = FALSE) - , "Stopping. Best iteration") - expect_false(is.null(bst$best_iteration)) - expect_lt(bst$best_iteration, 19) - expect_equal(bst$best_iteration, bst$best_ntreelimit) - - pred <- predict(bst, dtest) - expect_equal(length(pred), 1611) - err_pred <- err(ltest, pred) - err_log <- bst$evaluation_log[bst$best_iteration, test_error] - expect_equal(err_log, err_pred, tolerance = 5e-6) - - set.seed(11) - expect_silent( - bst0 <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.3, - early_stopping_rounds = 3, maximize = FALSE, verbose = 0) - ) - expect_equal(bst$evaluation_log, bst0$evaluation_log) - - xgb.save(bst, "model.bin") - loaded <- xgb.load("model.bin") - - expect_false(is.null(loaded$best_iteration)) - expect_equal(loaded$best_iteration, bst$best_ntreelimit) - expect_equal(loaded$best_ntreelimit, bst$best_ntreelimit) - - file.remove("model.bin") -}) - -test_that("early stopping using a specific metric works", { - set.seed(11) - expect_output( - bst <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.6, - eval_metric="logloss", eval_metric="auc", - callbacks = list(cb.early.stop(stopping_rounds = 3, maximize = FALSE, - metric_name = 'test_logloss'))) - , "Stopping. Best iteration") - expect_false(is.null(bst$best_iteration)) - expect_lt(bst$best_iteration, 19) - expect_equal(bst$best_iteration, bst$best_ntreelimit) - - pred <- predict(bst, dtest, ntreelimit = bst$best_ntreelimit) - expect_equal(length(pred), 1611) - logloss_pred <- sum(-ltest * log(pred) - (1 - ltest) * log(1 - pred)) / length(ltest) - logloss_log <- bst$evaluation_log[bst$best_iteration, test_logloss] - expect_equal(logloss_log, logloss_pred, tolerance = 1e-5) -}) - -test_that("early stopping xgb.cv works", { - set.seed(11) - expect_output( - cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.3, nrounds = 20, - early_stopping_rounds = 3, maximize = FALSE) - , "Stopping. Best iteration") - expect_false(is.null(cv$best_iteration)) - expect_lt(cv$best_iteration, 19) - expect_equal(cv$best_iteration, cv$best_ntreelimit) - # the best error is min error: - expect_true(cv$evaluation_log[, test_error_mean[cv$best_iteration] == min(test_error_mean)]) -}) - -test_that("prediction in xgb.cv works", { - set.seed(11) - nrounds = 4 - cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.5, nrounds = nrounds, prediction = TRUE, verbose = 0) - expect_false(is.null(cv$evaluation_log)) - expect_false(is.null(cv$pred)) - expect_length(cv$pred, nrow(train$data)) - err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) ) - err_log <- cv$evaluation_log[nrounds, test_error_mean] - expect_equal(err_pred, err_log, tolerance = 1e-6) - - # save CV models - set.seed(11) - cvx <- xgb.cv(param, dtrain, nfold = 5, eta = 0.5, nrounds = nrounds, prediction = TRUE, verbose = 0, - callbacks = list(cb.cv.predict(save_models = TRUE))) - expect_equal(cv$evaluation_log, cvx$evaluation_log) - expect_length(cvx$models, 5) - expect_true(all(sapply(cvx$models, class) == 'xgb.Booster')) -}) - -test_that("prediction in xgb.cv works for gblinear too", { - set.seed(11) - p <- list(booster = 'gblinear', objective = "reg:logistic", nthread = 2) - cv <- xgb.cv(p, dtrain, nfold = 5, eta = 0.5, nrounds = 2, prediction = TRUE, verbose = 0) - expect_false(is.null(cv$evaluation_log)) - expect_false(is.null(cv$pred)) - expect_length(cv$pred, nrow(train$data)) -}) - -test_that("prediction in early-stopping xgb.cv works", { - set.seed(11) - expect_output( - cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.1, nrounds = 20, - early_stopping_rounds = 5, maximize = FALSE, stratified = FALSE, - prediction = TRUE) - , "Stopping. Best iteration") - - expect_false(is.null(cv$best_iteration)) - expect_lt(cv$best_iteration, 19) - expect_false(is.null(cv$evaluation_log)) - expect_false(is.null(cv$pred)) - expect_length(cv$pred, nrow(train$data)) - - err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) ) - err_log <- cv$evaluation_log[cv$best_iteration, test_error_mean] - expect_equal(err_pred, err_log, tolerance = 1e-6) - err_log_last <- cv$evaluation_log[cv$niter, test_error_mean] - expect_gt(abs(err_pred - err_log_last), 1e-4) -}) - -test_that("prediction in xgb.cv for softprob works", { - lb <- as.numeric(iris$Species) - 1 - set.seed(11) - expect_warning( - cv <- xgb.cv(data = as.matrix(iris[, -5]), label = lb, nfold = 4, - eta = 0.5, nrounds = 5, max_depth = 3, nthread = 2, - subsample = 0.8, gamma = 2, verbose = 0, - prediction = TRUE, objective = "multi:softprob", num_class = 3) - , NA) - expect_false(is.null(cv$pred)) - expect_equal(dim(cv$pred), c(nrow(iris), 3)) - expect_lt(diff(range(rowSums(cv$pred))), 1e-6) -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_custom_objective.R b/ml-xgboost/R-package/tests/testthat/test_custom_objective.R deleted file mode 100644 index ab01147..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_custom_objective.R +++ /dev/null @@ -1,77 +0,0 @@ -context('Test models with custom objective') - -require(xgboost) - -set.seed(1994) - -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) -watchlist <- list(eval = dtest, train = dtrain) - -logregobj <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - preds <- 1 / (1 + exp(-preds)) - grad <- preds - labels - hess <- preds * (1 - preds) - return(list(grad = grad, hess = hess)) -} - -evalerror <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - err <- as.numeric(sum(labels != (preds > 0))) / length(labels) - return(list(metric = "error", value = err)) -} - -param <- list(max_depth=2, eta=1, nthread = 2, - objective=logregobj, eval_metric=evalerror) -num_round <- 2 - -test_that("custom objective works", { - bst <- xgb.train(param, dtrain, num_round, watchlist) - expect_equal(class(bst), "xgb.Booster") - expect_false(is.null(bst$evaluation_log)) - expect_false(is.null(bst$evaluation_log$eval_error)) - expect_lt(bst$evaluation_log[num_round, eval_error], 0.03) -}) - -test_that("custom objective in CV works", { - cv <- xgb.cv(param, dtrain, num_round, nfold=10, verbose=FALSE) - expect_false(is.null(cv$evaluation_log)) - expect_equal(dim(cv$evaluation_log), c(2, 5)) - expect_lt(cv$evaluation_log[num_round, test_error_mean], 0.03) -}) - -test_that("custom objective using DMatrix attr works", { - - attr(dtrain, 'label') <- getinfo(dtrain, 'label') - - logregobjattr <- function(preds, dtrain) { - labels <- attr(dtrain, 'label') - preds <- 1 / (1 + exp(-preds)) - grad <- preds - labels - hess <- preds * (1 - preds) - return(list(grad = grad, hess = hess)) - } - param$objective = logregobjattr - bst <- xgb.train(param, dtrain, num_round, watchlist) - expect_equal(class(bst), "xgb.Booster") -}) - -test_that("custom objective with multi-class works", { - data = as.matrix(iris[, -5]) - label = as.numeric(iris$Species) - 1 - dtrain <- xgb.DMatrix(data = data, label = label) - nclasses <- 3 - - fake_softprob <- function(preds, dtrain) { - expect_true(all(matrix(preds) == 0.5)) - grad <- rnorm(dim(as.matrix(preds))[1]) - expect_equal(dim(data)[1] * nclasses, dim(as.matrix(preds))[1]) - hess <- rnorm(dim(as.matrix(preds))[1]) - return (list(grad = grad, hess = hess)) - } - param$objective = fake_softprob - bst <- xgb.train(param, dtrain, 1, num_class=nclasses) -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_dmatrix.R b/ml-xgboost/R-package/tests/testthat/test_dmatrix.R deleted file mode 100644 index c063589..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_dmatrix.R +++ /dev/null @@ -1,117 +0,0 @@ -require(xgboost) -require(Matrix) - -context("testing xgb.DMatrix functionality") - -data(agaricus.test, package='xgboost') -test_data <- agaricus.test$data[1:100,] -test_label <- agaricus.test$label[1:100] - -test_that("xgb.DMatrix: basic construction", { - # from sparse matrix - dtest1 <- xgb.DMatrix(test_data, label=test_label) - - # from dense matrix - dtest2 <- xgb.DMatrix(as.matrix(test_data), label=test_label) - expect_equal(getinfo(dtest1, 'label'), getinfo(dtest2, 'label')) - expect_equal(dim(dtest1), dim(dtest2)) - - #from dense integer matrix - int_data <- as.matrix(test_data) - storage.mode(int_data) <- "integer" - dtest3 <- xgb.DMatrix(int_data, label=test_label) - expect_equal(dim(dtest1), dim(dtest3)) -}) - -test_that("xgb.DMatrix: saving, loading", { - # save to a local file - dtest1 <- xgb.DMatrix(test_data, label=test_label) - tmp_file <- tempfile('xgb.DMatrix_') - expect_true(xgb.DMatrix.save(dtest1, tmp_file)) - # read from a local file - expect_output(dtest3 <- xgb.DMatrix(tmp_file), "entries loaded from") - expect_output(dtest3 <- xgb.DMatrix(tmp_file, silent = TRUE), NA) - unlink(tmp_file) - expect_equal(getinfo(dtest1, 'label'), getinfo(dtest3, 'label')) - - # from a libsvm text file - tmp <- c("0 1:1 2:1","1 3:1","0 1:1") - tmp_file <- 'tmp.libsvm' - writeLines(tmp, tmp_file) - dtest4 <- xgb.DMatrix(tmp_file, silent = TRUE) - expect_equal(dim(dtest4), c(3, 4)) - expect_equal(getinfo(dtest4, 'label'), c(0,1,0)) - unlink(tmp_file) -}) - -test_that("xgb.DMatrix: getinfo & setinfo", { - dtest <- xgb.DMatrix(test_data) - expect_true(setinfo(dtest, 'label', test_label)) - labels <- getinfo(dtest, 'label') - expect_equal(test_label, getinfo(dtest, 'label')) - - expect_true(setinfo(dtest, 'label_lower_bound', test_label)) - expect_equal(test_label, getinfo(dtest, 'label_lower_bound')) - - expect_true(setinfo(dtest, 'label_upper_bound', test_label)) - expect_equal(test_label, getinfo(dtest, 'label_upper_bound')) - - expect_true(length(getinfo(dtest, 'weight')) == 0) - expect_true(length(getinfo(dtest, 'base_margin')) == 0) - - expect_true(setinfo(dtest, 'weight', test_label)) - expect_true(setinfo(dtest, 'base_margin', test_label)) - expect_true(setinfo(dtest, 'group', c(50,50))) - expect_error(setinfo(dtest, 'group', test_label)) - - # providing character values will give a warning - expect_warning(setinfo(dtest, 'weight', rep('a', nrow(test_data)))) - - # any other label should error - expect_error(setinfo(dtest, 'asdf', test_label)) -}) - -test_that("xgb.DMatrix: slice, dim", { - dtest <- xgb.DMatrix(test_data, label=test_label) - expect_equal(dim(dtest), dim(test_data)) - dsub1 <- slice(dtest, 1:42) - expect_equal(nrow(dsub1), 42) - expect_equal(ncol(dsub1), ncol(test_data)) - - dsub2 <- dtest[1:42,] - expect_equal(dim(dtest), dim(test_data)) - expect_equal(getinfo(dsub1, 'label'), getinfo(dsub2, 'label')) -}) - -test_that("xgb.DMatrix: slice, trailing empty rows", { - data(agaricus.train, package='xgboost') - train_data <- agaricus.train$data - train_label <- agaricus.train$label - dtrain <- xgb.DMatrix(data=train_data, label=train_label) - slice(dtrain, 6513L) - train_data[6513, ] <- 0 - dtrain <- xgb.DMatrix(data=train_data, label=train_label) - slice(dtrain, 6513L) - expect_equal(nrow(dtrain), 6513) -}) - -test_that("xgb.DMatrix: colnames", { - dtest <- xgb.DMatrix(test_data, label=test_label) - expect_equal(colnames(dtest), colnames(test_data)) - expect_error( colnames(dtest) <- 'asdf') - new_names <- make.names(1:ncol(test_data)) - expect_silent( colnames(dtest) <- new_names) - expect_equal(colnames(dtest), new_names) - expect_silent(colnames(dtest) <- NULL) - expect_null(colnames(dtest)) -}) - -test_that("xgb.DMatrix: nrow is correct for a very sparse matrix", { - set.seed(123) - nr <- 1000 - x <- rsparsematrix(nr, 100, density=0.0005) - # we want it very sparse, so that last rows are empty - expect_lt(max(x@i), nr) - dtest <- xgb.DMatrix(x) - expect_equal(dim(dtest), dim(x)) -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_gc_safety.R b/ml-xgboost/R-package/tests/testthat/test_gc_safety.R deleted file mode 100644 index b90f0f4..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_gc_safety.R +++ /dev/null @@ -1,15 +0,0 @@ -require(xgboost) - -context("Garbage Collection Safety Check") - -test_that("train and prediction when gctorture is on", { - data(agaricus.train, package='xgboost') - data(agaricus.test, package='xgboost') - train <- agaricus.train - test <- agaricus.test - gctorture(TRUE) - bst <- xgboost(data = train$data, label = train$label, max.depth = 2, - eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") - pred <- predict(bst, test$data) - gctorture(FALSE) -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_glm.R b/ml-xgboost/R-package/tests/testthat/test_glm.R deleted file mode 100644 index 9b4aa73..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_glm.R +++ /dev/null @@ -1,48 +0,0 @@ -context('Test generalized linear models') - -require(xgboost) - -test_that("gblinear works", { - data(agaricus.train, package='xgboost') - data(agaricus.test, package='xgboost') - dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) - dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) - - param <- list(objective = "binary:logistic", booster = "gblinear", - nthread = 2, eta = 0.8, alpha = 0.0001, lambda = 0.0001) - watchlist <- list(eval = dtest, train = dtrain) - - n <- 5 # iterations - ERR_UL <- 0.005 # upper limit for the test set error - VERB <- 0 # chatterbox switch - - param$updater = 'shotgun' - bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'shuffle') - ypred <- predict(bst, dtest) - expect_equal(length(getinfo(dtest, 'label')), 1611) - expect_lt(bst$evaluation_log$eval_error[n], ERR_UL) - - bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'cyclic', - callbacks = list(cb.gblinear.history())) - expect_lt(bst$evaluation_log$eval_error[n], ERR_UL) - h <- xgb.gblinear.history(bst) - expect_equal(dim(h), c(n, ncol(dtrain) + 1)) - expect_is(h, "matrix") - - param$updater = 'coord_descent' - bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'cyclic') - expect_lt(bst$evaluation_log$eval_error[n], ERR_UL) - - bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'shuffle') - expect_lt(bst$evaluation_log$eval_error[n], ERR_UL) - - bst <- xgb.train(param, dtrain, 2, watchlist, verbose = VERB, feature_selector = 'greedy') - expect_lt(bst$evaluation_log$eval_error[2], ERR_UL) - - bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'thrifty', - top_k = 50, callbacks = list(cb.gblinear.history(sparse = TRUE))) - expect_lt(bst$evaluation_log$eval_error[n], ERR_UL) - h <- xgb.gblinear.history(bst) - expect_equal(dim(h), c(n, ncol(dtrain) + 1)) - expect_s4_class(h, "dgCMatrix") -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_helpers.R b/ml-xgboost/R-package/tests/testthat/test_helpers.R deleted file mode 100644 index 5c14d53..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_helpers.R +++ /dev/null @@ -1,376 +0,0 @@ -context('Test helper functions') - -require(xgboost) -require(data.table) -require(Matrix) -require(vcd, quietly = TRUE) - -float_tolerance = 5e-6 - -# disable some tests for 32-bit environment -flag_32bit = .Machine$sizeof.pointer != 8 - -set.seed(1982) -data(Arthritis) -df <- data.table(Arthritis, keep.rownames = F) -df[,AgeDiscret := as.factor(round(Age / 10,0))] -df[,AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))] -df[,ID := NULL] -sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df) -label <- df[, ifelse(Improved == "Marked", 1, 0)] - -# binary -nrounds <- 12 -bst.Tree <- xgboost(data = sparse_matrix, label = label, max_depth = 9, - eta = 1, nthread = 2, nrounds = nrounds, verbose = 0, - objective = "binary:logistic", booster = "gbtree") - -bst.GLM <- xgboost(data = sparse_matrix, label = label, - eta = 1, nthread = 1, nrounds = nrounds, verbose = 0, - objective = "binary:logistic", booster = "gblinear") - -feature.names <- colnames(sparse_matrix) - -# multiclass -mlabel <- as.numeric(iris$Species) - 1 -nclass <- 3 -mbst.Tree <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0, - max_depth = 3, eta = 0.5, nthread = 2, nrounds = nrounds, - objective = "multi:softprob", num_class = nclass, base_score = 0) - -mbst.GLM <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0, - booster = "gblinear", eta = 0.1, nthread = 1, nrounds = nrounds, - objective = "multi:softprob", num_class = nclass, base_score = 0) - - -test_that("xgb.dump works", { - if (!flag_32bit) - expect_length(xgb.dump(bst.Tree), 200) - dump_file = file.path(tempdir(), 'xgb.model.dump') - expect_true(xgb.dump(bst.Tree, dump_file, with_stats = T)) - expect_true(file.exists(dump_file)) - expect_gt(file.size(dump_file), 8000) - - # JSON format - dmp <- xgb.dump(bst.Tree, dump_format = "json") - expect_length(dmp, 1) - if (!flag_32bit) - expect_length(grep('nodeid', strsplit(dmp, '\n')[[1]]), 188) -}) - -test_that("xgb.dump works for gblinear", { - expect_length(xgb.dump(bst.GLM), 14) - # also make sure that it works properly for a sparse model where some coefficients - # are 0 from setting large L1 regularization: - bst.GLM.sp <- xgboost(data = sparse_matrix, label = label, eta = 1, nthread = 2, nrounds = 1, - alpha=2, objective = "binary:logistic", booster = "gblinear") - d.sp <- xgb.dump(bst.GLM.sp) - expect_length(d.sp, 14) - expect_gt(sum(d.sp == "0"), 0) - - # JSON format - dmp <- xgb.dump(bst.GLM.sp, dump_format = "json") - expect_length(dmp, 1) - expect_length(grep('\\d', strsplit(dmp, '\n')[[1]]), 11) -}) - -test_that("predict leafs works", { - # no error for gbtree - expect_error(pred_leaf <- predict(bst.Tree, sparse_matrix, predleaf = TRUE), regexp = NA) - expect_equal(dim(pred_leaf), c(nrow(sparse_matrix), nrounds)) - # error for gblinear - expect_error(predict(bst.GLM, sparse_matrix, predleaf = TRUE)) -}) - -test_that("predict feature contributions works", { - # gbtree binary classifier - expect_error(pred_contr <- predict(bst.Tree, sparse_matrix, predcontrib = TRUE), regexp = NA) - expect_equal(dim(pred_contr), c(nrow(sparse_matrix), ncol(sparse_matrix) + 1)) - expect_equal(colnames(pred_contr), c(colnames(sparse_matrix), "BIAS")) - pred <- predict(bst.Tree, sparse_matrix, outputmargin = TRUE) - expect_lt(max(abs(rowSums(pred_contr) - pred)), 1e-5) - # must work with data that has no column names - X <- sparse_matrix - colnames(X) <- NULL - expect_error(pred_contr_ <- predict(bst.Tree, X, predcontrib = TRUE), regexp = NA) - expect_equal(pred_contr, pred_contr_, check.attributes = FALSE, - tolerance = float_tolerance) - - # gbtree binary classifier (approximate method) - expect_error(pred_contr <- predict(bst.Tree, sparse_matrix, predcontrib = TRUE, approxcontrib = TRUE), regexp = NA) - expect_equal(dim(pred_contr), c(nrow(sparse_matrix), ncol(sparse_matrix) + 1)) - expect_equal(colnames(pred_contr), c(colnames(sparse_matrix), "BIAS")) - pred <- predict(bst.Tree, sparse_matrix, outputmargin = TRUE) - expect_lt(max(abs(rowSums(pred_contr) - pred)), 1e-5) - - # gblinear binary classifier - expect_error(pred_contr <- predict(bst.GLM, sparse_matrix, predcontrib = TRUE), regexp = NA) - expect_equal(dim(pred_contr), c(nrow(sparse_matrix), ncol(sparse_matrix) + 1)) - expect_equal(colnames(pred_contr), c(colnames(sparse_matrix), "BIAS")) - pred <- predict(bst.GLM, sparse_matrix, outputmargin = TRUE) - expect_lt(max(abs(rowSums(pred_contr) - pred)), 1e-5) - # manual calculation of linear terms - coefs <- xgb.dump(bst.GLM)[-c(1,2,4)] %>% as.numeric - coefs <- c(coefs[-1], coefs[1]) # intercept must be the last - pred_contr_manual <- sweep(cbind(sparse_matrix, 1), 2, coefs, FUN="*") - expect_equal(as.numeric(pred_contr), as.numeric(pred_contr_manual), - tolerance = float_tolerance) - - # gbtree multiclass - pred <- predict(mbst.Tree, as.matrix(iris[, -5]), outputmargin = TRUE, reshape = TRUE) - pred_contr <- predict(mbst.Tree, as.matrix(iris[, -5]), predcontrib = TRUE) - expect_is(pred_contr, "list") - expect_length(pred_contr, 3) - for (g in seq_along(pred_contr)) { - expect_equal(colnames(pred_contr[[g]]), c(colnames(iris[, -5]), "BIAS")) - expect_lt(max(abs(rowSums(pred_contr[[g]]) - pred[, g])), 1e-5) - } - - # gblinear multiclass (set base_score = 0, which is base margin in multiclass) - pred <- predict(mbst.GLM, as.matrix(iris[, -5]), outputmargin = TRUE, reshape = TRUE) - pred_contr <- predict(mbst.GLM, as.matrix(iris[, -5]), predcontrib = TRUE) - expect_length(pred_contr, 3) - coefs_all <- xgb.dump(mbst.GLM)[-c(1,2,6)] %>% as.numeric %>% matrix(ncol = 3, byrow = TRUE) - for (g in seq_along(pred_contr)) { - expect_equal(colnames(pred_contr[[g]]), c(colnames(iris[, -5]), "BIAS")) - expect_lt(max(abs(rowSums(pred_contr[[g]]) - pred[, g])), float_tolerance) - # manual calculation of linear terms - coefs <- c(coefs_all[-1, g], coefs_all[1, g]) # intercept needs to be the last - pred_contr_manual <- sweep(as.matrix(cbind(iris[,-5], 1)), 2, coefs, FUN="*") - expect_equal(as.numeric(pred_contr[[g]]), as.numeric(pred_contr_manual), - tolerance = float_tolerance) - } -}) - -test_that("SHAPs sum to predictions, with or without DART", { - d <- cbind( - x1 = rnorm(100), - x2 = rnorm(100), - x3 = rnorm(100)) - y <- d[,"x1"] + d[,"x2"]^2 + - ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) + - rnorm(100) - nrounds <- 30 - - for (booster in list("gbtree", "dart")) { - fit <- xgboost( - params = c( - list( - booster = booster, - objective = "reg:squarederror", - eval_metric = "rmse"), - if (booster == "dart") - list(rate_drop = .01, one_drop = T)), - data = d, - label = y, - nrounds = nrounds) - - pr <- function(...) - predict(fit, newdata = d, ...) - pred <- pr() - shap <- pr(predcontrib = T) - shapi <- pr(predinteraction = T) - tol = 1e-5 - - expect_equal(rowSums(shap), pred, tol = tol) - expect_equal(apply(shapi, 1, sum), pred, tol = tol) - for (i in 1 : nrow(d)) - for (f in list(rowSums, colSums)) - expect_equal(f(shapi[i,,]), shap[i,], tol = tol) - } -}) - -test_that("xgb-attribute functionality", { - val <- "my attribute value" - list.val <- list(my_attr=val, a=123, b='ok') - list.ch <- list.val[order(names(list.val))] - list.ch <- lapply(list.ch, as.character) - # note: iter is 0-index in xgb attributes - list.default <- list(niter = as.character(nrounds - 1)) - list.ch <- c(list.ch, list.default) - # proper input: - expect_error(xgb.attr(bst.Tree, NULL)) - expect_error(xgb.attr(val, val)) - # set & get: - expect_null(xgb.attr(bst.Tree, "asdf")) - expect_equal(xgb.attributes(bst.Tree), list.default) - xgb.attr(bst.Tree, "my_attr") <- val - expect_equal(xgb.attr(bst.Tree, "my_attr"), val) - xgb.attributes(bst.Tree) <- list.val - expect_equal(xgb.attributes(bst.Tree), list.ch) - # serializing: - xgb.save(bst.Tree, 'xgb.model') - bst <- xgb.load('xgb.model') - if (file.exists('xgb.model')) file.remove('xgb.model') - expect_equal(xgb.attr(bst, "my_attr"), val) - expect_equal(xgb.attributes(bst), list.ch) - # deletion: - xgb.attr(bst, "my_attr") <- NULL - expect_null(xgb.attr(bst, "my_attr")) - expect_equal(xgb.attributes(bst), list.ch[c("a", "b", "niter")]) - xgb.attributes(bst) <- list(a=NULL, b=NULL) - expect_equal(xgb.attributes(bst), list.default) - xgb.attributes(bst) <- list(niter=NULL) - expect_null(xgb.attributes(bst)) -}) - -if (grepl('Windows', Sys.info()[['sysname']]) || - grepl('Linux', Sys.info()[['sysname']]) || - grepl('Darwin', Sys.info()[['sysname']])) { - test_that("xgb-attribute numeric precision", { - # check that lossless conversion works with 17 digits - # numeric -> character -> numeric - X <- 10^runif(100, -20, 20) - if (capabilities('long.double')) { - X2X <- as.numeric(format(X, digits = 17)) - expect_identical(X, X2X) - } - # retrieved attributes to be the same as written - for (x in X) { - xgb.attr(bst.Tree, "x") <- x - expect_equal(as.numeric(xgb.attr(bst.Tree, "x")), x, tolerance = float_tolerance) - xgb.attributes(bst.Tree) <- list(a = "A", b = x) - expect_equal(as.numeric(xgb.attr(bst.Tree, "b")), x, tolerance = float_tolerance) - } - }) -} - -test_that("xgb.Booster serializing as R object works", { - saveRDS(bst.Tree, 'xgb.model.rds') - bst <- readRDS('xgb.model.rds') - if (file.exists('xgb.model.rds')) file.remove('xgb.model.rds') - dtrain <- xgb.DMatrix(sparse_matrix, label = label) - expect_equal(predict(bst.Tree, dtrain), predict(bst, dtrain), tolerance = float_tolerance) - expect_equal(xgb.dump(bst.Tree), xgb.dump(bst)) - xgb.save(bst, 'xgb.model') - if (file.exists('xgb.model')) file.remove('xgb.model') - nil_ptr <- new("externalptr") - class(nil_ptr) <- "xgb.Booster.handle" - expect_true(identical(bst$handle, nil_ptr)) - bst <- xgb.Booster.complete(bst) - expect_true(!identical(bst$handle, nil_ptr)) - expect_equal(predict(bst.Tree, dtrain), predict(bst, dtrain), tolerance = float_tolerance) -}) - -test_that("xgb.model.dt.tree works with and without feature names", { - names.dt.trees <- c("Tree", "Node", "ID", "Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover") - dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree) - expect_equal(names.dt.trees, names(dt.tree)) - if (!flag_32bit) - expect_equal(dim(dt.tree), c(188, 10)) - expect_output(str(dt.tree), 'Feature.*\\"Age\\"') - - dt.tree.0 <- xgb.model.dt.tree(model = bst.Tree) - expect_equal(dt.tree, dt.tree.0) - - # when model contains no feature names: - bst.Tree.x <- bst.Tree - bst.Tree.x$feature_names <- NULL - dt.tree.x <- xgb.model.dt.tree(model = bst.Tree.x) - expect_output(str(dt.tree.x), 'Feature.*\\"3\\"') - expect_equal(dt.tree[, -4, with=FALSE], dt.tree.x[, -4, with=FALSE]) - - # using integer node ID instead of character - dt.tree.int <- xgb.model.dt.tree(model = bst.Tree, use_int_id = TRUE) - expect_equal(as.integer(tstrsplit(dt.tree$Yes, '-')[[2]]), dt.tree.int$Yes) - expect_equal(as.integer(tstrsplit(dt.tree$No, '-')[[2]]), dt.tree.int$No) - expect_equal(as.integer(tstrsplit(dt.tree$Missing, '-')[[2]]), dt.tree.int$Missing) -}) - -test_that("xgb.model.dt.tree throws error for gblinear", { - expect_error(xgb.model.dt.tree(model = bst.GLM)) -}) - -test_that("xgb.importance works with and without feature names", { - importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree) - if (!flag_32bit) - expect_equal(dim(importance.Tree), c(7, 4)) - expect_equal(colnames(importance.Tree), c("Feature", "Gain", "Cover", "Frequency")) - expect_output(str(importance.Tree), 'Feature.*\\"Age\\"') - - importance.Tree.0 <- xgb.importance(model = bst.Tree) - expect_equal(importance.Tree, importance.Tree.0, tolerance = float_tolerance) - - # when model contains no feature names: - bst.Tree.x <- bst.Tree - bst.Tree.x$feature_names <- NULL - importance.Tree.x <- xgb.importance(model = bst.Tree) - expect_equal(importance.Tree[, -1, with=FALSE], importance.Tree.x[, -1, with=FALSE], - tolerance = float_tolerance) - - imp2plot <- xgb.plot.importance(importance_matrix = importance.Tree) - expect_equal(colnames(imp2plot), c("Feature", "Gain", "Cover", "Frequency", "Importance")) - xgb.ggplot.importance(importance_matrix = importance.Tree) - - # for multiclass - imp.Tree <- xgb.importance(model = mbst.Tree) - expect_equal(dim(imp.Tree), c(4, 4)) - xgb.importance(model = mbst.Tree, trees = seq(from=0, by=nclass, length.out=nrounds)) -}) - -test_that("xgb.importance works with GLM model", { - importance.GLM <- xgb.importance(feature_names = feature.names, model = bst.GLM) - expect_equal(dim(importance.GLM), c(10, 2)) - expect_equal(colnames(importance.GLM), c("Feature", "Weight")) - xgb.importance(model = bst.GLM) - imp2plot <- xgb.plot.importance(importance.GLM) - expect_equal(colnames(imp2plot), c("Feature", "Weight", "Importance")) - xgb.ggplot.importance(importance.GLM) - - # for multiclass - imp.GLM <- xgb.importance(model = mbst.GLM) - expect_equal(dim(imp.GLM), c(12, 3)) - expect_equal(imp.GLM$Class, rep(0:2, each=4)) -}) - -test_that("xgb.model.dt.tree and xgb.importance work with a single split model", { - bst1 <- xgboost(data = sparse_matrix, label = label, max_depth = 1, - eta = 1, nthread = 2, nrounds = 1, verbose = 0, - objective = "binary:logistic") - expect_error(dt <- xgb.model.dt.tree(model = bst1), regexp = NA) # no error - expect_equal(nrow(dt), 3) - expect_error(imp <- xgb.importance(model = bst1), regexp = NA) # no error - expect_equal(nrow(imp), 1) - expect_equal(imp$Gain, 1) -}) - -test_that("xgb.plot.tree works with and without feature names", { - xgb.plot.tree(feature_names = feature.names, model = bst.Tree) - xgb.plot.tree(model = bst.Tree) -}) - -test_that("xgb.plot.multi.trees works with and without feature names", { - xgb.plot.multi.trees(model = bst.Tree, feature_names = feature.names, features_keep = 3) - xgb.plot.multi.trees(model = bst.Tree, features_keep = 3) -}) - -test_that("xgb.plot.deepness works", { - d2p <- xgb.plot.deepness(model = bst.Tree) - expect_equal(colnames(d2p), c("ID", "Tree", "Depth", "Cover", "Weight")) - xgb.plot.deepness(model = bst.Tree, which = "med.depth") - xgb.ggplot.deepness(model = bst.Tree) -}) - -test_that("xgb.plot.shap works", { - sh <- xgb.plot.shap(data = sparse_matrix, model = bst.Tree, top_n = 2, col = 4) - expect_equal(names(sh), c("data", "shap_contrib")) - expect_equal(NCOL(sh$data), 2) - expect_equal(NCOL(sh$shap_contrib), 2) -}) - -test_that("check.deprecation works", { - ttt <- function(a = NNULL, DUMMY=NULL, ...) { - check.deprecation(...) - as.list((environment())) - } - res <- ttt(a = 1, DUMMY = 2, z = 3) - expect_equal(res, list(a = 1, DUMMY = 2)) - expect_warning( - res <- ttt(a = 1, dummy = 22, z = 3) - , "\'dummy\' is deprecated") - expect_equal(res, list(a = 1, DUMMY = 22)) - expect_warning( - res <- ttt(a = 1, dumm = 22, z = 3) - , "\'dumm\' was partially matched to \'dummy\'") - expect_equal(res, list(a = 1, DUMMY = 22)) -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_interaction_constraints.R b/ml-xgboost/R-package/tests/testthat/test_interaction_constraints.R deleted file mode 100644 index 9a3ddf4..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_interaction_constraints.R +++ /dev/null @@ -1,55 +0,0 @@ -require(xgboost) - -context("interaction constraints") - -set.seed(1024) -x1 <- rnorm(1000, 1) -x2 <- rnorm(1000, 1) -x3 <- sample(c(1,2,3), size=1000, replace=TRUE) -y <- x1 + x2 + x3 + x1*x2*x3 + rnorm(1000, 0.001) + 3*sin(x1) -train <- matrix(c(x1,x2,x3), ncol = 3) - -test_that("interaction constraints for regression", { - # Fit a model that only allows interaction between x1 and x2 - bst <- xgboost(data = train, label = y, max_depth = 3, - eta = 0.1, nthread = 2, nrounds = 100, verbose = 0, - interaction_constraints = list(c(0,1))) - - # Set all observations to have the same x3 values then increment - # by the same amount - preds <- lapply(c(1,2,3), function(x){ - tmat <- matrix(c(x1,x2,rep(x,1000)), ncol=3) - return(predict(bst, tmat)) - }) - - # Check incrementing x3 has the same effect on all observations - # since x3 is constrained to be independent of x1 and x2 - # and all observations start off from the same x3 value - diff1 <- preds[[2]] - preds[[1]] - test1 <- all(abs(diff1 - diff1[1]) < 1e-4) - - diff2 <- preds[[3]] - preds[[2]] - test2 <- all(abs(diff2 - diff2[1]) < 1e-4) - - expect_true({ - test1 & test2 - }, "Interaction Contraint Satisfied") -}) - -test_that("interaction constraints scientific representation", { - rows <- 10 - ## When number exceeds 1e5, R paste function uses scientific representation. - ## See: https://github.com/dmlc/xgboost/issues/5179 - cols <- 1e5+10 - - d <- matrix(rexp(rows, rate=.1), nrow=rows, ncol=cols) - y <- rnorm(rows) - - dtrain <- xgb.DMatrix(data=d, info = list(label=y)) - inc <- list(c(seq.int(from = 0, to = cols, by = 1))) - - with_inc <- xgb.train(data=dtrain, tree_method='hist', - interaction_constraints=inc, nrounds=10) - without_inc <- xgb.train(data=dtrain, tree_method='hist', nrounds=10) - expect_equal(xgb.save.raw(with_inc), xgb.save.raw(without_inc)) -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_interactions.R b/ml-xgboost/R-package/tests/testthat/test_interactions.R deleted file mode 100644 index 20ee90c..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_interactions.R +++ /dev/null @@ -1,141 +0,0 @@ -context('Test prediction of feature interactions') - -require(xgboost) -require(magrittr) - -set.seed(123) - -test_that("predict feature interactions works", { - # simulate some binary data and a linear outcome with an interaction term - N <- 1000 - P <- 5 - X <- matrix(rbinom(N * P, 1, 0.5), ncol=P, dimnames = list(NULL, letters[1:P])) - # center the data (as contributions are computed WRT feature means) - X <- scale(X, scale=FALSE) - - # outcome without any interactions, without any noise: - f <- function(x) 2 * x[, 1] - 3 * x[, 2] - # outcome with interactions, without noise: - f_int <- function(x) f(x) + 2 * x[, 2] * x[, 3] - # outcome with interactions, with noise: - #f_int_noise <- function(x) f_int(x) + rnorm(N, 0, 0.3) - - y <- f_int(X) - - dm <- xgb.DMatrix(X, label = y) - param <- list(eta=0.1, max_depth=4, base_score=mean(y), lambda=0, nthread=2) - b <- xgb.train(param, dm, 100) - - pred = predict(b, dm, outputmargin=TRUE) - - # SHAP contributions: - cont <- predict(b, dm, predcontrib=TRUE) - expect_equal(dim(cont), c(N, P+1)) - # make sure for each row they add up to marginal predictions - max(abs(rowSums(cont) - pred)) %>% expect_lt(0.001) - # Hand-construct the 'ground truth' feature contributions: - gt_cont <- cbind( - 2. * X[, 1], - -3. * X[, 2] + 1. * X[, 2] * X[, 3], # attribute a HALF of the interaction term to feature #2 - 1. * X[, 2] * X[, 3] # and another HALF of the interaction term to feature #3 - ) - gt_cont <- cbind(gt_cont, matrix(0, nrow=N, ncol=P + 1 - 3)) - # These should be relatively close: - expect_lt(max(abs(cont - gt_cont)), 0.05) - - - # SHAP interaction contributions: - intr <- predict(b, dm, predinteraction=TRUE) - expect_equal(dim(intr), c(N, P+1, P+1)) - # check assigned colnames - cn <- c(letters[1:P], "BIAS") - expect_equal(dimnames(intr), list(NULL, cn, cn)) - - # check the symmetry - max(abs(aperm(intr, c(1,3,2)) - intr)) %>% expect_lt(0.00001) - - # sums WRT columns must be close to feature contributions - max(abs(apply(intr, c(1,2), sum) - cont)) %>% expect_lt(0.00001) - - # diagonal terms for features 3,4,5 must be close to zero - Reduce(max, sapply(3:P, function(i) max(abs(intr[, i, i])))) %>% expect_lt(0.05) - - # BIAS must have no interactions - max(abs(intr[, 1:P, P+1])) %>% expect_lt(0.00001) - - # interactions other than 2 x 3 must be close to zero - intr23 <- intr - intr23[,2,3] <- 0 - Reduce(max, sapply(1:P, function(i) max(abs(intr23[, i, (i+1):(P+1)])))) %>% expect_lt(0.05) - - # Construct the 'ground truth' contributions of interactions directly from the linear terms: - gt_intr <- array(0, c(N, P+1, P+1)) - gt_intr[,2,3] <- 1. * X[, 2] * X[, 3] # attribute a HALF of the interaction term to each symmetric element - gt_intr[,3,2] <- gt_intr[, 2, 3] - # merge-in the diagonal based on 'ground truth' feature contributions - intr_diag = gt_cont - apply(gt_intr, c(1,2), sum) - for(j in seq_len(P)) { - gt_intr[,j,j] = intr_diag[,j] - } - # These should be relatively close: - expect_lt(max(abs(intr - gt_intr)), 0.1) -}) - -test_that("SHAP contribution values are not NAN", { - d <- data.frame( - x1 = c(-2.3, 1.4, 5.9, 2, 2.5, 0.3, -3.6, -0.2, 0.5, -2.8, -4.6, 3.3, -1.2, - -1.1, -2.3, 0.4, -1.5, -0.2, -1, 3.7), - x2 = c(291.179171, 269.198331, 289.942097, 283.191669, 269.673332, - 294.158346, 287.255835, 291.530838, 285.899586, 269.290833, - 268.649586, 291.530841, 280.074593, 269.484168, 293.94042, - 294.327506, 296.20709, 295.441669, 283.16792, 270.227085), - y = c(9, 15, 5.7, 9.2, 22.4, 5, 9, 3.2, 7.2, 13.1, 7.8, 16.9, 6.5, 22.1, - 5.3, 10.4, 11.1, 13.9, 11, 20.5), - fold = c(2, 2, 2, 1, 2, 2, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)) - - ivs <- c("x1", "x2") - - fit <- xgboost( - verbose = 0, - params = list( - objective = "reg:squarederror", - eval_metric = "rmse"), - data = as.matrix(subset(d, fold == 2)[, ivs]), - label = subset(d, fold == 2)$y, - nthread = 1, - nrounds = 3) - - shaps <- as.data.frame(predict(fit, - newdata = as.matrix(subset(d, fold == 1)[, ivs]), - predcontrib = T)) - result <- cbind(shaps, sum = rowSums(shaps), pred = predict(fit, - newdata = as.matrix(subset(d, fold == 1)[, ivs]))) - - expect_true(identical(TRUE, all.equal(result$sum, result$pred, tol = 1e-6))) -}) - - -test_that("multiclass feature interactions work", { - dm <- xgb.DMatrix(as.matrix(iris[,-5]), label=as.numeric(iris$Species)-1) - param <- list(eta=0.1, max_depth=4, objective='multi:softprob', num_class=3) - b <- xgb.train(param, dm, 40) - pred = predict(b, dm, outputmargin=TRUE) %>% array(c(3, 150)) %>% t - - # SHAP contributions: - cont <- predict(b, dm, predcontrib=TRUE) - expect_length(cont, 3) - # rewrap them as a 3d array - cont <- unlist(cont) %>% array(c(150, 5, 3)) - # make sure for each row they add up to marginal predictions - max(abs(apply(cont, c(1,3), sum) - pred)) %>% expect_lt(0.001) - - # SHAP interaction contributions: - intr <- predict(b, dm, predinteraction=TRUE) - expect_length(intr, 3) - # rewrap them as a 4d array - intr <- unlist(intr) %>% array(c(150, 5, 5, 3)) %>% aperm(c(4, 1, 2, 3)) # [grp, row, col, col] - # check the symmetry - max(abs(aperm(intr, c(1,2,4,3)) - intr)) %>% expect_lt(0.00001) - # sums WRT columns must be close to feature contributions - max(abs(apply(intr, c(1,2,3), sum) - aperm(cont, c(3,1,2)))) %>% expect_lt(0.00001) -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_lint.R b/ml-xgboost/R-package/tests/testthat/test_lint.R deleted file mode 100644 index 2f2a07d..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_lint.R +++ /dev/null @@ -1,27 +0,0 @@ -context("Code is of high quality and lint free") -test_that("Code Lint", { - skip_on_cran() - skip_on_travis() - skip_if_not_installed("lintr") - my_linters <- list( - absolute_paths_linter=lintr::absolute_paths_linter, - assignment_linter=lintr::assignment_linter, - closed_curly_linter=lintr::closed_curly_linter, - commas_linter=lintr::commas_linter, - # commented_code_linter=lintr::commented_code_linter, - infix_spaces_linter=lintr::infix_spaces_linter, - line_length_linter=lintr::line_length_linter, - no_tab_linter=lintr::no_tab_linter, - object_usage_linter=lintr::object_usage_linter, - # snake_case_linter=lintr::snake_case_linter, - # multiple_dots_linter=lintr::multiple_dots_linter, - object_length_linter=lintr::object_length_linter, - open_curly_linter=lintr::open_curly_linter, - # single_quotes_linter=lintr::single_quotes_linter, - spaces_inside_linter=lintr::spaces_inside_linter, - spaces_left_parentheses_linter=lintr::spaces_left_parentheses_linter, - trailing_blank_lines_linter=lintr::trailing_blank_lines_linter, - trailing_whitespace_linter=lintr::trailing_whitespace_linter - ) - # lintr::expect_lint_free(linters=my_linters) # uncomment this if you want to check code quality -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_monotone.R b/ml-xgboost/R-package/tests/testthat/test_monotone.R deleted file mode 100644 index 9991e91..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_monotone.R +++ /dev/null @@ -1,24 +0,0 @@ -require(xgboost) - -context("monotone constraints") - -set.seed(1024) -x = rnorm(1000, 10) -y = -1*x + rnorm(1000, 0.001) + 3*sin(x) -train = matrix(x, ncol = 1) - - -test_that("monotone constraints for regression", { - bst = xgboost(data = train, label = y, max_depth = 2, - eta = 0.1, nthread = 2, nrounds = 100, verbose = 0, - monotone_constraints = -1) - - pred = predict(bst, train) - - ind = order(train[,1]) - pred.ord = pred[ind] - expect_true({ - !any(diff(pred.ord) > 0) - }, "Monotone Contraint Satisfied") - -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_parameter_exposure.R b/ml-xgboost/R-package/tests/testthat/test_parameter_exposure.R deleted file mode 100644 index 1a0dcb3..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_parameter_exposure.R +++ /dev/null @@ -1,30 +0,0 @@ -context('Test model params and call are exposed to R') - -require(xgboost) - -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') - -dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) - -bst <- xgboost(data = dtrain, - max_depth = 2, - eta = 1, - nrounds = 10, - nthread = 1, - verbose = 0, - objective = "binary:logistic") - -test_that("call is exposed to R", { - expect_false(is.null(bst$call)) - expect_is(bst$call, "call") -}) - -test_that("params is exposed to R", { - model_params <- bst$params - expect_is(model_params, "list") - expect_equal(model_params$eta, 1) - expect_equal(model_params$max_depth, 2) - expect_equal(model_params$objective, "binary:logistic") -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_poisson_regression.R b/ml-xgboost/R-package/tests/testthat/test_poisson_regression.R deleted file mode 100644 index a48f2fc..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_poisson_regression.R +++ /dev/null @@ -1,14 +0,0 @@ -context('Test poisson regression model') - -require(xgboost) -set.seed(1994) - -test_that("poisson regression works", { - data(mtcars) - bst <- xgboost(data = as.matrix(mtcars[,-11]), label = mtcars[,11], - objective = 'count:poisson', nrounds=10, verbose=0) - expect_equal(class(bst), "xgb.Booster") - pred <- predict(bst, as.matrix(mtcars[, -11])) - expect_equal(length(pred), 32) - expect_lt(sqrt(mean( (pred - mtcars[,11])^2 )), 1.2) -}) diff --git a/ml-xgboost/R-package/tests/testthat/test_update.R b/ml-xgboost/R-package/tests/testthat/test_update.R deleted file mode 100644 index fa48c91..0000000 --- a/ml-xgboost/R-package/tests/testthat/test_update.R +++ /dev/null @@ -1,107 +0,0 @@ -require(xgboost) - -context("update trees in an existing model") - -data(agaricus.train, package = 'xgboost') -data(agaricus.test, package = 'xgboost') -dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label) -dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label) - -# Disable flaky tests for 32-bit Windows. -# See https://github.com/dmlc/xgboost/issues/3720 -win32_flag = .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8 - -test_that("updating the model works", { - watchlist = list(train = dtrain, test = dtest) - - # no-subsampling - p1 <- list(objective = "binary:logistic", max_depth = 2, eta = 0.05, nthread = 2) - set.seed(11) - bst1 <- xgb.train(p1, dtrain, nrounds = 10, watchlist, verbose = 0) - tr1 <- xgb.model.dt.tree(model = bst1) - - # with subsampling - p2 <- modifyList(p1, list(subsample = 0.1)) - set.seed(11) - bst2 <- xgb.train(p2, dtrain, nrounds = 10, watchlist, verbose = 0) - tr2 <- xgb.model.dt.tree(model = bst2) - - # the same no-subsampling boosting with an extra 'refresh' updater: - p1r <- modifyList(p1, list(updater = 'grow_colmaker,prune,refresh', refresh_leaf = FALSE)) - set.seed(11) - bst1r <- xgb.train(p1r, dtrain, nrounds = 10, watchlist, verbose = 0) - tr1r <- xgb.model.dt.tree(model = bst1r) - # all should be the same when no subsampling - expect_equal(bst1$evaluation_log, bst1r$evaluation_log) - if (!win32_flag) { - expect_equal(tr1, tr1r, tolerance = 0.00001, check.attributes = FALSE) - } - - # the same boosting with subsampling with an extra 'refresh' updater: - p2r <- modifyList(p2, list(updater = 'grow_colmaker,prune,refresh', refresh_leaf = FALSE)) - set.seed(11) - bst2r <- xgb.train(p2r, dtrain, nrounds = 10, watchlist, verbose = 0) - tr2r <- xgb.model.dt.tree(model = bst2r) - # should be the same evaluation but different gains and larger cover - expect_equal(bst2$evaluation_log, bst2r$evaluation_log) - if (!win32_flag) { - expect_equal(tr2[Feature == 'Leaf']$Quality, tr2r[Feature == 'Leaf']$Quality) - } - expect_gt(sum(abs(tr2[Feature != 'Leaf']$Quality - tr2r[Feature != 'Leaf']$Quality)), 100) - expect_gt(sum(tr2r$Cover) / sum(tr2$Cover), 1.5) - - # process type 'update' for no-subsampling model, refreshing the tree stats AND leaves from training data: - p1u <- modifyList(p1, list(process_type = 'update', updater = 'refresh', refresh_leaf = TRUE)) - bst1u <- xgb.train(p1u, dtrain, nrounds = 10, watchlist, verbose = 0, xgb_model = bst1) - tr1u <- xgb.model.dt.tree(model = bst1u) - # all should be the same when no subsampling - expect_equal(bst1$evaluation_log, bst1u$evaluation_log) - expect_equal(tr1, tr1u, tolerance = 0.00001, check.attributes = FALSE) - - # process type 'update' for model with subsampling, refreshing only the tree stats from training data: - p2u <- modifyList(p2, list(process_type = 'update', updater = 'refresh', refresh_leaf = FALSE)) - bst2u <- xgb.train(p2u, dtrain, nrounds = 10, watchlist, verbose = 0, xgb_model = bst2) - tr2u <- xgb.model.dt.tree(model = bst2u) - # should be the same evaluation but different gains and larger cover - expect_equal(bst2$evaluation_log, bst2u$evaluation_log) - expect_equal(tr2[Feature == 'Leaf']$Quality, tr2u[Feature == 'Leaf']$Quality) - expect_gt(sum(abs(tr2[Feature != 'Leaf']$Quality - tr2u[Feature != 'Leaf']$Quality)), 100) - expect_gt(sum(tr2u$Cover) / sum(tr2$Cover), 1.5) - # the results should be the same as for the model with an extra 'refresh' updater - expect_equal(bst2r$evaluation_log, bst2u$evaluation_log) - if (!win32_flag) { - expect_equal(tr2r, tr2u, tolerance = 0.00001, check.attributes = FALSE) - } - - # process type 'update' for no-subsampling model, refreshing only the tree stats from TEST data: - p1ut <- modifyList(p1, list(process_type = 'update', updater = 'refresh', refresh_leaf = FALSE)) - bst1ut <- xgb.train(p1ut, dtest, nrounds = 10, watchlist, verbose = 0, xgb_model = bst1) - tr1ut <- xgb.model.dt.tree(model = bst1ut) - # should be the same evaluations but different gains and smaller cover (test data is smaller) - expect_equal(bst1$evaluation_log, bst1ut$evaluation_log) - expect_equal(tr1[Feature == 'Leaf']$Quality, tr1ut[Feature == 'Leaf']$Quality) - expect_gt(sum(abs(tr1[Feature != 'Leaf']$Quality - tr1ut[Feature != 'Leaf']$Quality)), 100) - expect_lt(sum(tr1ut$Cover) / sum(tr1$Cover), 0.5) -}) - -test_that("updating works for multiclass & multitree", { - dtr <- xgb.DMatrix(as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1) - watchlist <- list(train = dtr) - p0 <- list(max_depth = 2, eta = 0.5, nthread = 2, subsample = 0.6, - objective = "multi:softprob", num_class = 3, num_parallel_tree = 2, - base_score = 0) - set.seed(121) - bst0 <- xgb.train(p0, dtr, 5, watchlist, verbose = 0) - tr0 <- xgb.model.dt.tree(model = bst0) - - # run update process for an original model with subsampling - p0u <- modifyList(p0, list(process_type='update', updater='refresh', refresh_leaf=FALSE)) - bst0u <- xgb.train(p0u, dtr, nrounds = bst0$niter, watchlist, xgb_model = bst0, verbose = 0) - tr0u <- xgb.model.dt.tree(model = bst0u) - - # should be the same evaluation but different gains and larger cover - expect_equal(bst0$evaluation_log, bst0u$evaluation_log) - expect_equal(tr0[Feature == 'Leaf']$Quality, tr0u[Feature == 'Leaf']$Quality) - expect_gt(sum(abs(tr0[Feature != 'Leaf']$Quality - tr0u[Feature != 'Leaf']$Quality)), 100) - expect_gt(sum(tr0u$Cover) / sum(tr0$Cover), 1.5) -}) diff --git a/ml-xgboost/R-package/vignettes/discoverYourData.Rmd b/ml-xgboost/R-package/vignettes/discoverYourData.Rmd deleted file mode 100644 index 67b7340..0000000 --- a/ml-xgboost/R-package/vignettes/discoverYourData.Rmd +++ /dev/null @@ -1,338 +0,0 @@ ---- -title: "Understand your dataset with Xgboost" -output: - rmarkdown::html_vignette: - css: vignette.css - number_sections: yes - toc: yes -author: Tianqi Chen, Tong He, Michaël Benesty, Yuan Tang -vignette: > - %\VignetteIndexEntry{Discover your data} - %\VignetteEngine{knitr::rmarkdown} - \usepackage[utf8]{inputenc} ---- - -Understand your dataset with XGBoost -==================================== - -Introduction ------------- - -The purpose of this vignette is to show you how to use **Xgboost** to discover and understand your own dataset better. - -This vignette is not about predicting anything (see [Xgboost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)). We will explain how to use **Xgboost** to highlight the *link* between the *features* of your data and the *outcome*. - -Package loading: - -```{r libLoading, results='hold', message=F, warning=F} -require(xgboost) -require(Matrix) -require(data.table) -if (!require('vcd')) install.packages('vcd') -``` - -> **VCD** package is used for one of its embedded dataset only. - -Preparation of the dataset --------------------------- - -### Numeric v.s. categorical variables - - -**Xgboost** manages only `numeric` vectors. - -What to do when you have *categorical* data? - -A *categorical* variable has a fixed number of different values. For instance, if a variable called *Colour* can have only one of these three values, *red*, *blue* or *green*, then *Colour* is a *categorical* variable. - -> In **R**, a *categorical* variable is called `factor`. -> -> Type `?factor` in the console for more information. - -To answer the question above we will convert *categorical* variables to `numeric` one. - -### Conversion from categorical to numeric variables - -#### Looking at the raw data - -In this Vignette we will see how to transform a *dense* `data.frame` (*dense* = few zeroes in the matrix) with *categorical* variables to a very *sparse* matrix (*sparse* = lots of zero in the matrix) of `numeric` features. - -The method we are going to see is usually called [one-hot encoding](http://en.wikipedia.org/wiki/One-hot). - -The first step is to load `Arthritis` dataset in memory and wrap it with `data.table` package. - -```{r, results='hide'} -data(Arthritis) -df <- data.table(Arthritis, keep.rownames = F) -``` - -> `data.table` is 100% compliant with **R** `data.frame` but its syntax is more consistent and its performance for large dataset is [best in class](http://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly) (`dplyr` from **R** and `Pandas` from **Python** [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)). Some parts of **Xgboost** **R** package use `data.table`. - -The first thing we want to do is to have a look to the first few lines of the `data.table`: - -```{r} -head(df) -``` - -Now we will check the format of each column. - -```{r} -str(df) -``` - -2 columns have `factor` type, one has `ordinal` type. - -> `ordinal` variable : -> -> * can take a limited number of values (like `factor`) ; -> * these values are ordered (unlike `factor`). Here these ordered values are: `Marked > Some > None` - -#### Creation of new features based on old ones - -We will add some new *categorical* features to see if it helps. - -##### Grouping per 10 years - -For the first feature we create groups of age by rounding the real age. - -Note that we transform it to `factor` so the algorithm treat these age groups as independent values. - -Therefore, 20 is not closer to 30 than 60. To make it short, the distance between ages is lost in this transformation. - -```{r} -head(df[,AgeDiscret := as.factor(round(Age/10,0))]) -``` - -##### Random split into two groups - -Following is an even stronger simplification of the real age with an arbitrary split at 30 years old. We choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...). - -```{r} -head(df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))]) -``` - -##### Risks in adding correlated features - -These new features are highly correlated to the `Age` feature because they are simple transformations of this feature. - -For many machine learning algorithms, using correlated features is not a good idea. It may sometimes make prediction less accurate, and most of the time make interpretation of the model almost impossible. GLM, for instance, assumes that the features are uncorrelated. - -Fortunately, decision tree algorithms (including boosted trees) are very robust to these features. Therefore we have nothing to do to manage this situation. - -##### Cleaning data - -We remove ID as there is nothing to learn from this feature (it would just add some noise). - -```{r, results='hide'} -df[,ID:=NULL] -``` - -We will list the different values for the column `Treatment`: - -```{r} -levels(df[,Treatment]) -``` - - -#### Encoding categorical features - -Next step, we will transform the categorical data to dummy variables. -Several encoding methods exist, e.g., [one-hot encoding](http://en.wikipedia.org/wiki/One-hot) is a common approach. -We will use the [dummy contrast coding](http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm#dummy) which is popular because it produces "full rank" encoding (also see [this blog post by Max Kuhn](http://appliedpredictivemodeling.com/blog/2013/10/23/the-basics-of-encoding-categorical-data-for-predictive-models)). - -The purpose is to transform each value of each *categorical* feature into a *binary* feature `{0, 1}`. - -For example, the column `Treatment` will be replaced by two columns, `TreatmentPlacebo`, and `TreatmentTreated`. Each of them will be *binary*. Therefore, an observation which has the value `Placebo` in column `Treatment` before the transformation will have after the transformation the value `1` in the new column `TreatmentPlacebo` and the value `0` in the new column `TreatmentTreated`. The column `TreatmentPlacebo` will disappear during the contrast encoding, as it would be absorbed into a common constant intercept column. - -Column `Improved` is excluded because it will be our `label` column, the one we want to predict. - -```{r, warning=FALSE,message=FALSE} -sparse_matrix <- sparse.model.matrix(Improved ~ ., data = df)[,-1] -head(sparse_matrix) -``` - -> Formula `Improved ~ .` used above means transform all *categorical* features but column `Improved` to binary values. The `-1` column selection removes the intercept column which is full of `1` (this column is generated by the conversion). For more information, you can type `?sparse.model.matrix` in the console. - -Create the output `numeric` vector (not as a sparse `Matrix`): - -```{r} -output_vector = df[,Improved] == "Marked" -``` - -1. set `Y` vector to `0`; -2. set `Y` to `1` for rows where `Improved == Marked` is `TRUE` ; -3. return `Y` vector. - -Build the model ---------------- - -The code below is very usual. For more information, you can look at the documentation of `xgboost` function (or at the vignette [Xgboost presentation](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd)). - -```{r} -bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4, - eta = 1, nthread = 2, nrounds = 10,objective = "binary:logistic") - -``` - -You can see some `train-error: 0.XXXXX` lines followed by a number. It decreases. Each line shows how well the model explains your data. Lower is better. - -A model which fits too well may [overfit](http://en.wikipedia.org/wiki/Overfitting) (meaning it copy/paste too much the past, and won't be that good to predict the future). - -> Here you can see the numbers decrease until line 7 and then increase. -> -> It probably means we are overfitting. To fix that I should reduce the number of rounds to `nrounds = 4`. I will let things like that because I don't really care for the purpose of this example :-) - -Feature importance ------------------- - -## Measure feature importance - - -### Build the feature importance data.table - -Remember, each binary column corresponds to a single value of one of *categorical* features. - -```{r} -importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst) -head(importance) -``` - -> The column `Gain` provide the information we are looking for. -> -> As you can see, features are classified by `Gain`. - -`Gain` is the improvement in accuracy brought by a feature to the branches it is on. The idea is that before adding a new split on a feature X to the branch there was some wrongly classified elements, after adding the split on this feature, there are two new branches, and each of these branch is more accurate (one branch saying if your observation is on this branch then it should be classified as `1`, and the other branch saying the exact opposite). - -`Cover` measures the relative quantity of observations concerned by a feature. - -`Frequency` is a simpler way to measure the `Gain`. It just counts the number of times a feature is used in all generated trees. You should not use it (unless you know why you want to use it). - -#### Improvement in the interpretability of feature importance data.table - -We can go deeper in the analysis of the model. In the `data.table` above, we have discovered which features counts to predict if the illness will go or not. But we don't yet know the role of these features. For instance, one of the question we may want to answer would be: does receiving a placebo treatment helps to recover from the illness? - -One simple solution is to count the co-occurrences of a feature and a class of the classification. - -For that purpose we will execute the same function as above but using two more parameters, `data` and `label`. - -```{r} -importanceRaw <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst, data = sparse_matrix, label = output_vector) - -# Cleaning for better display -importanceClean <- importanceRaw[,`:=`(Cover=NULL, Frequency=NULL)] - -head(importanceClean) -``` - -> In the table above we have removed two not needed columns and select only the first lines. - -First thing you notice is the new column `Split`. It is the split applied to the feature on a branch of one of the tree. Each split is present, therefore a feature can appear several times in this table. Here we can see the feature `Age` is used several times with different splits. - -How the split is applied to count the co-occurrences? It is always `<`. For instance, in the second line, we measure the number of persons under 61.5 years with the illness gone after the treatment. - -The two other new columns are `RealCover` and `RealCover %`. In the first column it measures the number of observations in the dataset where the split is respected and the label marked as `1`. The second column is the percentage of the whole population that `RealCover` represents. - -Therefore, according to our findings, getting a placebo doesn't seem to help but being younger than 61 years may help (seems logic). - -> You may wonder how to interpret the `< 1.00001` on the first line. Basically, in a sparse `Matrix`, there is no `0`, therefore, looking for one hot-encoded categorical observations validating the rule `< 1.00001` is like just looking for `1` for this feature. - -### Plotting the feature importance - - -All these things are nice, but it would be even better to plot the results. - -```{r, fig.width=8, fig.height=5, fig.align='center'} -xgb.plot.importance(importance_matrix = importance) -``` - -Feature have automatically been divided in 2 clusters: the interesting features... and the others. - -> Depending of the dataset and the learning parameters you may have more than two clusters. Default value is to limit them to `10`, but you can increase this limit. Look at the function documentation for more information. - -According to the plot above, the most important features in this dataset to predict if the treatment will work are : - -* the Age ; -* having received a placebo or not ; -* the sex is third but already included in the not interesting features group ; -* then we see our generated features (AgeDiscret). We can see that their contribution is very low. - -### Do these results make sense? - - -Let's check some **Chi2** between each of these features and the label. - -Higher **Chi2** means better correlation. - -```{r, warning=FALSE, message=FALSE} -c2 <- chisq.test(df$Age, output_vector) -print(c2) -``` - -Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**. - -```{r, warning=FALSE, message=FALSE} -c2 <- chisq.test(df$AgeDiscret, output_vector) -print(c2) -``` - -Our first simplification of Age gives a Pearson correlation is **`r round(c2$statistic, 2)`**. - -```{r, warning=FALSE, message=FALSE} -c2 <- chisq.test(df$AgeCat, output_vector) -print(c2) -``` - -The perfectly random split I did between young and old at 30 years old have a low correlation of **`r round(c2$statistic, 2)`**. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same. - -Morality: don't let your *gut* lower the quality of your model. - -In *data science* expression, there is the word *science* :-) - -Conclusion ----------- - -As you can see, in general *destroying information by simplifying it won't improve your model*. **Chi2** just demonstrates that. - -But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model. - -The case studied here is not enough complex to show that. Check [Kaggle website](http://www.kaggle.com/) for some challenging datasets. However it's almost always worse when you add some arbitrary rules. - -Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age. - -Linear model may not be that smart in this scenario. - -Special Note: What about Random Forests™? ------------------------------------------ - -As you may know, [Random Forests™](http://en.wikipedia.org/wiki/Random_forest) algorithm is cousin with boosting and both are part of the [ensemble learning](http://en.wikipedia.org/wiki/Ensemble_learning) family. - -Both trains several decision trees for one dataset. The *main* difference is that in Random Forests™, trees are independent and in boosting, the tree `N+1` focus its learning on the loss (<=> what has not been well modeled by the tree `N`). - -This difference have an impact on a corner case in feature importance analysis: the *correlated features*. - -Imagine two features perfectly correlated, feature `A` and feature `B`. For one specific tree, if the algorithm needs one of them, it will choose randomly (true in both boosting and Random Forests™). - -However, in Random Forests™ this random choice will be done for each tree, because each tree is independent from the others. Therefore, approximatively, depending of your parameters, 50% of the trees will choose feature `A` and the other 50% will choose feature `B`. So the *importance* of the information contained in `A` and `B` (which is the same, because they are perfectly correlated) is diluted in `A` and `B`. So you won't easily know this information is important to predict what you want to predict! It is even worse when you have 10 correlated features... - -In boosting, when a specific link between feature and outcome have been learned by the algorithm, it will try to not refocus on it (in theory it is what happens, reality is not always that simple). Therefore, all the importance will be on feature `A` or on feature `B` (but not both). You will know that one feature have an important role in the link between the observations and the label. It is still up to you to search for the correlated features to the one detected as important if you need to know all of them. - -If you want to try Random Forests™ algorithm, you can tweak Xgboost parameters! - -For instance, to compute a model with 1000 trees, with a 0.5 factor on sampling rows and columns: - -```{r, warning=FALSE, message=FALSE} -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test - -#Random Forest™ - 1000 trees -bst <- xgboost(data = train$data, label = train$label, max_depth = 4, num_parallel_tree = 1000, subsample = 0.5, colsample_bytree =0.5, nrounds = 1, objective = "binary:logistic") - -#Boosting - 3 rounds -bst <- xgboost(data = train$data, label = train$label, max_depth = 4, nrounds = 3, objective = "binary:logistic") -``` - -> Note that the parameter `round` is set to `1`. - -> [**Random Forests™**](https://www.stat.berkeley.edu/~breiman/RandomForests/cc_papers.htm) is a trademark of Leo Breiman and Adele Cutler and is licensed exclusively to Salford Systems for the commercial release of the software. diff --git a/ml-xgboost/R-package/vignettes/vignette.css b/ml-xgboost/R-package/vignettes/vignette.css deleted file mode 100644 index 59dfcd8..0000000 --- a/ml-xgboost/R-package/vignettes/vignette.css +++ /dev/null @@ -1,225 +0,0 @@ -body { - margin: 0 auto; - background-color: white; - -/* --------- FONT FAMILY -------- - following are some optional font families. Usually a family - is safer to choose than a specific font, - which may not be on the users computer */ -/ font-family:Georgia, Palatino, serif; - font-family: "Open Sans", "Book Antiqua", Palatino, serif; -/ font-family:Arial, Helvetica, sans-serif; -/ font-family:Tahoma, Verdana, Geneva, sans-serif; -/ font-family:Courier, monospace; -/ font-family:"Times New Roman", Times, serif; - -/* -------------- COLOR OPTIONS ------------ - following are additional color options for base font - you could uncomment another one to easily change the base color - or add one to a specific element style below */ - color: #333333; /* dark gray not black */ -/ color: #000000; /* black */ -/ color: #666666; /* medium gray black */ -/ color: #E3E3E3; /* very light gray */ -/ color: white; - - line-height: 100%; - max-width: 800px; - padding: 10px; - font-size: 17px; - text-align: justify; - text-justify: inter-word; -} - - -p { - line-height: 150%; -/ max-width: 540px; - max-width: 960px; - margin-bottom: 5px; - font-weight: 400; -/ color: #333333 -} - - -h1, h2, h3, h4, h5, h6 { - font-weight: 400; - margin-top: 35px; - margin-bottom: 15px; - padding-top: 10px; -} - -h1 { - margin-top: 70px; - color: #606AAA; - font-size:230%; - font-variant:small-caps; - padding-bottom:20px; - width:100%; - border-bottom:1px solid #606AAA; -} - -h2 { - font-size:160%; -} - -h3 { - font-size:130%; -} - -h4 { - font-size:120%; - font-variant:small-caps; -} - -h5 { - font-size:120%; -} - -h6 { - font-size:120%; - font-variant:small-caps; -} - -a { - color: #606AAA; - margin: 0; - padding: 0; - vertical-align: baseline; -} - -a:hover { - text-decoration: blink; - color: green; -} - -a:visited { - color: gray; -} - -ul, ol { - padding: 0; - margin: 0px 0px 0px 50px; -} -ul { - list-style-type: square; - list-style-position: inside; - -} - -li { - line-height:150% -} - -li ul, li ul { - margin-left: 24px; -} - -pre { - padding: 0px 10px; - max-width: 800px; - white-space: pre-wrap; -} - -code { - font-family: Consolas, Monaco, Andale Mono, monospace, courrier new; - line-height: 1.5; - font-size: 15px; - background: #F8F8F8; - border-radius: 4px; - padding: 5px; - display: inline-block; - max-width: 800px; - white-space: pre-wrap; -} - - -li code, p code { - background: #CDCDCD; - color: #606AAA; - padding: 0px 5px 0px 5px; -} - -code.r, code.cpp { - display: block; - word-wrap: break-word; - border: 1px solid #606AAA; -} - -aside { - display: block; - float: right; - width: 390px; -} - -blockquote { - border-left:.5em solid #606AAA; - background: #F8F8F8; - padding: 0em 1em 0em 1em; - margin-left:10px; - max-width: 500px; -} - -blockquote cite { - line-height:10px; - color:#bfbfbf; -} - -blockquote cite:before { - /content: '\2014 \00A0'; -} - -blockquote p, blockquote li { - color: #666; -} -hr { -/ width: 540px; - text-align: left; - margin: 0 auto 0 0; - color: #999; -} - - -/* table */ - -table { - width: 100%; - border-top: 1px solid #919699; - border-left: 1px solid #919699; - border-spacing: 0; -} - -table th { - padding: 4px 8px 4px 8px; - text-align: center; - color: white; - background: #606AAA; - border-bottom: 1px solid #919699; - border-right: 1px solid #919699; -} -table th p { - font-weight: bold; - margin-bottom: 0px; -} - -table td { - padding: 8px; - vertical-align: top; - border-bottom: 1px solid #919699; - border-right: 1px solid #919699; -} - -table td:last-child { - /background: lightgray; - text-align: right; -} - -table td p { - margin-bottom: 0px; -} -table td p + p { - margin-top: 5px; -} -table td p + p + p { - margin-top: 5px; -} diff --git a/ml-xgboost/R-package/vignettes/xgboost.Rnw b/ml-xgboost/R-package/vignettes/xgboost.Rnw deleted file mode 100644 index dfbb2f1..0000000 --- a/ml-xgboost/R-package/vignettes/xgboost.Rnw +++ /dev/null @@ -1,222 +0,0 @@ -\documentclass{article} -\RequirePackage{url} -\usepackage{hyperref} -\RequirePackage{amsmath} -\RequirePackage{natbib} -\RequirePackage[a4paper,lmargin={1.25in},rmargin={1.25in},tmargin={1in},bmargin={1in}]{geometry} - -\makeatletter -% \VignetteIndexEntry{xgboost: eXtreme Gradient Boosting} -%\VignetteKeywords{xgboost, gbm, gradient boosting machines} -%\VignettePackage{xgboost} -% \VignetteEngine{knitr::knitr} -\makeatother - -\begin{document} -%\SweaveOpts{concordance=TRUE} - -<>= -if (require('knitr')) opts_chunk$set(fig.width = 5, fig.height = 5, fig.align = 'center', tidy = FALSE, warning = FALSE, cache = TRUE) -@ - -% -<>= -xgboost.version <- packageDescription("xgboost")$Version - -@ -% - - \begin{center} - \vspace*{6\baselineskip} - \rule{\textwidth}{1.6pt}\vspace*{-\baselineskip}\vspace*{2pt} - \rule{\textwidth}{0.4pt}\\[2\baselineskip] - {\LARGE \textbf{xgboost: eXtreme Gradient Boosting}}\\[1.2\baselineskip] - \rule{\textwidth}{0.4pt}\vspace*{-\baselineskip}\vspace{3.2pt} - \rule{\textwidth}{1.6pt}\\[2\baselineskip] - {\Large Tianqi Chen, Tong He}\\[\baselineskip] - {\large Package Version: \Sexpr{xgboost.version}}\\[\baselineskip] - {\large \today}\par - \vfill - \end{center} - -\thispagestyle{empty} - -\clearpage - -\setcounter{page}{1} - -\section{Introduction} - -This is an introductory document of using the \verb@xgboost@ package in R. - -\verb@xgboost@ is short for eXtreme Gradient Boosting package. It is an efficient - and scalable implementation of gradient boosting framework by \citep{friedman2001greedy} \citep{friedman2000additive}. -The package includes efficient linear model solver and tree learning algorithm. -It supports various objective functions, including regression, classification -and ranking. The package is made to be extendible, so that users are also allowed to define their own objectives easily. It has several features: -\begin{enumerate} - \item{Speed: }{\verb@xgboost@ can automatically do parallel computation on - Windows and Linux, with openmp. It is generally over 10 times faster than - \verb@gbm@.} - \item{Input Type: }{\verb@xgboost@ takes several types of input data:} - \begin{itemize} - \item{Dense Matrix: }{R's dense matrix, i.e. \verb@matrix@} - \item{Sparse Matrix: }{R's sparse matrix \verb@Matrix::dgCMatrix@} - \item{Data File: }{Local data files} - \item{xgb.DMatrix: }{\verb@xgboost@'s own class. Recommended.} - \end{itemize} - \item{Sparsity: }{\verb@xgboost@ accepts sparse input for both tree booster - and linear booster, and is optimized for sparse input.} - \item{Customization: }{\verb@xgboost@ supports customized objective function - and evaluation function} - \item{Performance: }{\verb@xgboost@ has better performance on several different - datasets.} -\end{enumerate} - - -\section{Example with Mushroom data} - -In this section, we will illustrate some common usage of \verb@xgboost@. The -Mushroom data is cited from UCI Machine Learning Repository. \citep{Bache+Lichman:2013} - -<>= -library(xgboost) -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test -bst <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, - nrounds = 2, objective = "binary:logistic") -xgb.save(bst, 'model.save') -bst = xgb.load('model.save') -pred <- predict(bst, test$data) -@ - -\verb@xgboost@ is the main function to train a \verb@Booster@, i.e. a model. -\verb@predict@ does prediction on the model. - -Here we can save the model to a binary local file, and load it when needed. -We can't inspect the trees inside. However we have another function to save the -model in plain text. -<>= -xgb.dump(bst, 'model.dump') -@ - -The output looks like - -\begin{verbatim} -booster[0]: -0:[f28<1.00001] yes=1,no=2,missing=2 - 1:[f108<1.00001] yes=3,no=4,missing=4 - 3:leaf=1.85965 - 4:leaf=-1.94071 - 2:[f55<1.00001] yes=5,no=6,missing=6 - 5:leaf=-1.70044 - 6:leaf=1.71218 -booster[1]: -0:[f59<1.00001] yes=1,no=2,missing=2 - 1:leaf=-6.23624 - 2:[f28<1.00001] yes=3,no=4,missing=4 - 3:leaf=-0.96853 - 4:leaf=0.784718 -\end{verbatim} - -It is important to know \verb@xgboost@'s own data type: \verb@xgb.DMatrix@. -It speeds up \verb@xgboost@, and is needed for advanced features such as -training from initial prediction value, weighted training instance. - -We can use \verb@xgb.DMatrix@ to construct an \verb@xgb.DMatrix@ object: -<>= -dtrain <- xgb.DMatrix(train$data, label = train$label) -class(dtrain) -head(getinfo(dtrain,'label')) -@ - -We can also save the matrix to a binary file. Then load it simply with -\verb@xgb.DMatrix@ -<>= -xgb.DMatrix.save(dtrain, 'xgb.DMatrix') -dtrain = xgb.DMatrix('xgb.DMatrix') -@ - -\section{Advanced Examples} - -The function \verb@xgboost@ is a simple function with less parameter, in order -to be R-friendly. The core training function is wrapped in \verb@xgb.train@. It is more flexible than \verb@xgboost@, but it requires users to read the document a bit more carefully. - -\verb@xgb.train@ only accept a \verb@xgb.DMatrix@ object as its input, while it supports advanced features as custom objective and evaluation functions. - -<>= -logregobj <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - preds <- 1/(1 + exp(-preds)) - grad <- preds - labels - hess <- preds * (1 - preds) - return(list(grad = grad, hess = hess)) -} - -evalerror <- function(preds, dtrain) { - labels <- getinfo(dtrain, "label") - err <- sqrt(mean((preds-labels)^2)) - return(list(metric = "MSE", value = err)) -} - -dtest <- xgb.DMatrix(test$data, label = test$label) -watchlist <- list(eval = dtest, train = dtrain) -param <- list(max_depth = 2, eta = 1, silent = 1) - -bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, logregobj, evalerror, maximize = FALSE) -@ - -The gradient and second order gradient is required for the output of customized -objective function. - -We also have \verb@slice@ for row extraction. It is useful in -cross-validation. - -For a walkthrough demo, please see \verb@R-package/demo/@ for further -details. - -\section{The Higgs Boson competition} - -We have made a demo for \href{http://www.kaggle.com/c/higgs-boson}{the Higgs -Boson Machine Learning Challenge}. - -Here are the instructions to make a submission -\begin{enumerate} - \item Download the \href{http://www.kaggle.com/c/higgs-boson/data}{datasets} - and extract them to \verb@data/@. - \item Run scripts under \verb@xgboost/demo/kaggle-higgs/@: - \href{https://github.com/tqchen/xgboost/blob/master/demo/kaggle-higgs/higgs-train.R}{higgs-train.R} - and \href{https://github.com/tqchen/xgboost/blob/master/demo/kaggle-higgs/higgs-pred.R}{higgs-pred.R}. - The computation will take less than a minute on Intel i7. - \item Go to the \href{http://www.kaggle.com/c/higgs-boson/submissions/attach}{submission page} - and submit your result. -\end{enumerate} - -We provide \href{https://github.com/tqchen/xgboost/blob/master/demo/kaggle-higgs/speedtest.R}{a script} -to compare the time cost on the higgs dataset with \verb@gbm@ and \verb@xgboost@. -The training set contains 350000 records and 30 features. - -\verb@xgboost@ can automatically do parallel computation. On a machine with Intel -i7-4700MQ and 24GB memories, we found that \verb@xgboost@ costs about 35 seconds, which is about 20 times faster -than \verb@gbm@. When we limited \verb@xgboost@ to use only one thread, it was -still about two times faster than \verb@gbm@. - -Meanwhile, the result from \verb@xgboost@ reaches -\href{http://www.kaggle.com/c/higgs-boson/details/evaluation}{3.60@AMS} with a -single model. This results stands in the -\href{http://www.kaggle.com/c/higgs-boson/leaderboard}{top 30\%} of the -competition. - -\bibliographystyle{jss} -\nocite{*} % list uncited references -\bibliography{xgboost} - -\end{document} - -<>= -file.remove("xgb.DMatrix") -file.remove("model.dump") -file.remove("model.save") -@ diff --git a/ml-xgboost/R-package/vignettes/xgboost.bib b/ml-xgboost/R-package/vignettes/xgboost.bib deleted file mode 100644 index f21bdae..0000000 --- a/ml-xgboost/R-package/vignettes/xgboost.bib +++ /dev/null @@ -1,30 +0,0 @@ -@article{friedman2001greedy, - title={Greedy function approximation: a gradient boosting machine}, - author={Friedman, Jerome H}, - journal={Annals of Statistics}, - pages={1189--1232}, - year={2001}, - publisher={JSTOR} -} - -@article{friedman2000additive, - title={Additive logistic regression: a statistical view of boosting (with discussion and a rejoinder by the authors)}, - author={Friedman, Jerome and Hastie, Trevor and Tibshirani, Robert and others}, - journal={The annals of statistics}, - volume={28}, - number={2}, - pages={337--407}, - year={2000}, - publisher={Institute of Mathematical Statistics} -} - - -@misc{ - Bache+Lichman:2013 , - author = "K. Bache and M. Lichman", - year = "2013", - title = "{UCI} Machine Learning Repository", - url = "http://archive.ics.uci.edu/ml", - institution = "University of California, Irvine, School of Information and Computer Sciences" -} - diff --git a/ml-xgboost/R-package/vignettes/xgboostPresentation.Rmd b/ml-xgboost/R-package/vignettes/xgboostPresentation.Rmd deleted file mode 100644 index 6d1bab7..0000000 --- a/ml-xgboost/R-package/vignettes/xgboostPresentation.Rmd +++ /dev/null @@ -1,428 +0,0 @@ ---- -title: "Xgboost presentation" -output: - rmarkdown::html_vignette: - css: vignette.css - number_sections: yes - toc: yes -bibliography: xgboost.bib -author: Tianqi Chen, Tong He, Michaël Benesty -vignette: > - %\VignetteIndexEntry{Xgboost presentation} - %\VignetteEngine{knitr::rmarkdown} - \usepackage[utf8]{inputenc} ---- - -XGBoost R Tutorial -================== - -## Introduction - - -**Xgboost** is short for e**X**treme **G**radient **Boost**ing package. - -The purpose of this Vignette is to show you how to use **Xgboost** to build a model and make predictions. - -It is an efficient and scalable implementation of gradient boosting framework by @friedman2000additive and @friedman2001greedy. Two solvers are included: - -- *linear* model ; -- *tree learning* algorithm. - -It supports various objective functions, including *regression*, *classification* and *ranking*. The package is made to be extendible, so that users are also allowed to define their own objective functions easily. - -It has been [used](https://github.com/dmlc/xgboost) to win several [Kaggle](http://www.kaggle.com) competitions. - -It has several features: - -* Speed: it can automatically do parallel computation on *Windows* and *Linux*, with *OpenMP*. It is generally over 10 times faster than the classical `gbm`. -* Input Type: it takes several types of input data: - * *Dense* Matrix: *R*'s *dense* matrix, i.e. `matrix` ; - * *Sparse* Matrix: *R*'s *sparse* matrix, i.e. `Matrix::dgCMatrix` ; - * Data File: local data files ; - * `xgb.DMatrix`: its own class (recommended). -* Sparsity: it accepts *sparse* input for both *tree booster* and *linear booster*, and is optimized for *sparse* input ; -* Customization: it supports customized objective functions and evaluation functions. - -## Installation - - -### Github version - - -For weekly updated version (highly recommended), install from *Github*: - -```{r installGithub, eval=FALSE} -install.packages("drat", repos="https://cran.rstudio.com") -drat:::addRepo("dmlc") -install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source") -``` - -> *Windows* user will need to install [Rtools](https://cran.r-project.org/bin/windows/Rtools/) first. - -### CRAN version - - -The version 0.4-2 is on CRAN, and you can install it by: - -```{r, eval=FALSE} -install.packages("xgboost") -``` - -Formerly available versions can be obtained from the CRAN [archive](https://cran.r-project.org/src/contrib/Archive/xgboost) - -## Learning - - -For the purpose of this tutorial we will load **XGBoost** package. - -```{r libLoading, results='hold', message=F, warning=F} -require(xgboost) -``` - -### Dataset presentation - - -In this example, we are aiming to predict whether a mushroom can be eaten or not (like in many tutorials, example data are the the same as you will use on in your every day life :-). - -Mushroom data is cited from UCI Machine Learning Repository. @Bache+Lichman:2013. - -### Dataset loading - - -We will load the `agaricus` datasets embedded with the package and will link them to variables. - -The datasets are already split in: - -* `train`: will be used to build the model ; -* `test`: will be used to assess the quality of our model. - -Why *split* the dataset in two parts? - -In the first part we will build our model. In the second part we will want to test it and assess its quality. Without dividing the dataset we would test the model on the data which the algorithm have already seen. - -```{r datasetLoading, results='hold', message=F, warning=F} -data(agaricus.train, package='xgboost') -data(agaricus.test, package='xgboost') -train <- agaricus.train -test <- agaricus.test -``` - -> In the real world, it would be up to you to make this division between `train` and `test` data. The way to do it is out of the purpose of this article, however `caret` package may [help](http://topepo.github.io/caret/data-splitting.html). - -Each variable is a `list` containing two things, `label` and `data`: - -```{r dataList, message=F, warning=F} -str(train) -``` - -`label` is the outcome of our dataset meaning it is the binary *classification* we will try to predict. - -Let's discover the dimensionality of our datasets. - -```{r dataSize, message=F, warning=F} -dim(train$data) -dim(test$data) -``` - -This dataset is very small to not make the **R** package too heavy, however **XGBoost** is built to manage huge dataset very efficiently. - -As seen below, the `data` are stored in a `dgCMatrix` which is a *sparse* matrix and `label` vector is a `numeric` vector (`{0,1}`): - -```{r dataClass, message=F, warning=F} -class(train$data)[1] -class(train$label) -``` - -### Basic Training using XGBoost - - -This step is the most critical part of the process for the quality of our model. - -#### Basic training - -We are using the `train` data. As explained above, both `data` and `label` are stored in a `list`. - -In a *sparse* matrix, cells containing `0` are not stored in memory. Therefore, in a dataset mainly made of `0`, memory size is reduced. It is very usual to have such dataset. - -We will train decision tree model using the following parameters: - -* `objective = "binary:logistic"`: we will train a binary classification model ; -* `max_depth = 2`: the trees won't be deep, because our case is very simple ; -* `nthread = 2`: the number of cpu threads we are going to use; -* `nrounds = 2`: there will be two passes on the data, the second one will enhance the model by further reducing the difference between ground truth and prediction. - -```{r trainingSparse, message=F, warning=F} -bstSparse <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -``` - -> More complex the relationship between your features and your `label` is, more passes you need. - -#### Parameter variations - -##### Dense matrix - -Alternatively, you can put your dataset in a *dense* matrix, i.e. a basic **R** matrix. - -```{r trainingDense, message=F, warning=F} -bstDense <- xgboost(data = as.matrix(train$data), label = train$label, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -``` - -##### xgb.DMatrix - -**XGBoost** offers a way to group them in a `xgb.DMatrix`. You can even add other meta data in it. It will be useful for the most advanced features we will discover later. - -```{r trainingDmatrix, message=F, warning=F} -dtrain <- xgb.DMatrix(data = train$data, label = train$label) -bstDMatrix <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic") -``` - -##### Verbose option - -**XGBoost** has several features to help you to view how the learning progress internally. The purpose is to help you to set the best parameters, which is the key of your model quality. - -One of the simplest way to see the training progress is to set the `verbose` option (see below for more advanced technics). - -```{r trainingVerbose0, message=T, warning=F} -# verbose = 0, no message -bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0) -``` - -```{r trainingVerbose1, message=T, warning=F} -# verbose = 1, print evaluation metric -bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 1) -``` - -```{r trainingVerbose2, message=T, warning=F} -# verbose = 2, also print information about tree -bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 2) -``` - -## Basic prediction using XGBoost - - -## Perform the prediction - - -The purpose of the model we have built is to classify new data. As explained before, we will use the `test` dataset for this step. - -```{r predicting, message=F, warning=F} -pred <- predict(bst, test$data) - -# size of the prediction vector -print(length(pred)) - -# limit display of predictions to the first 10 -print(head(pred)) -``` - -These numbers doesn't look like *binary classification* `{0,1}`. We need to perform a simple transformation before being able to use these results. - -## Transform the regression in a binary classification - - -The only thing that **XGBoost** does is a *regression*. **XGBoost** is using `label` vector to build its *regression* model. - -How can we use a *regression* model to perform a binary classification? - -If we think about the meaning of a regression applied to our data, the numbers we get are probabilities that a datum will be classified as `1`. Therefore, we will set the rule that if this probability for a specific datum is `> 0.5` then the observation is classified as `1` (or `0` otherwise). - -```{r predictingTest, message=F, warning=F} -prediction <- as.numeric(pred > 0.5) -print(head(prediction)) -``` - -## Measuring model performance - - -To measure the model performance, we will compute a simple metric, the *average error*. - -```{r predictingAverageError, message=F, warning=F} -err <- mean(as.numeric(pred > 0.5) != test$label) -print(paste("test-error=", err)) -``` - -> Note that the algorithm has not seen the `test` data during the model construction. - -Steps explanation: - -1. `as.numeric(pred > 0.5)` applies our rule that when the probability (<=> regression <=> prediction) is `> 0.5` the observation is classified as `1` and `0` otherwise ; -2. `probabilityVectorPreviouslyComputed != test$label` computes the vector of error between true data and computed probabilities ; -3. `mean(vectorOfErrors)` computes the *average error* itself. - -The most important thing to remember is that **to do a classification, you just do a regression to the** `label` **and then apply a threshold**. - -*Multiclass* classification works in a similar way. - -This metric is **`r round(err, 2)`** and is pretty low: our yummly mushroom model works well! - -## Advanced features - - -Most of the features below have been implemented to help you to improve your model by offering a better understanding of its content. - - -### Dataset preparation - - -For the following advanced features, we need to put data in `xgb.DMatrix` as explained above. - -```{r DMatrix, message=F, warning=F} -dtrain <- xgb.DMatrix(data = train$data, label=train$label) -dtest <- xgb.DMatrix(data = test$data, label=test$label) -``` - -### Measure learning progress with xgb.train - - -Both `xgboost` (simple) and `xgb.train` (advanced) functions train models. - -One of the special feature of `xgb.train` is the capacity to follow the progress of the learning after each round. Because of the way boosting works, there is a time when having too many rounds lead to an overfitting. You can see this feature as a cousin of cross-validation method. The following techniques will help you to avoid overfitting or optimizing the learning time in stopping it as soon as possible. - -One way to measure progress in learning of a model is to provide to **XGBoost** a second dataset already classified. Therefore it can learn on the first dataset and test its model on the second one. Some metrics are measured after each round during the learning. - -> in some way it is similar to what we have done above with the average error. The main difference is that below it was after building the model, and now it is during the construction that we measure errors. - -For the purpose of this example, we use `watchlist` parameter. It is a list of `xgb.DMatrix`, each of them tagged with a name. - -```{r watchlist, message=F, warning=F} -watchlist <- list(train=dtrain, test=dtest) - -bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic") -``` - -**XGBoost** has computed at each round the same average error metric than seen above (we set `nrounds` to 2, that is why we have two lines). Obviously, the `train-error` number is related to the training dataset (the one the algorithm learns from) and the `test-error` number to the test dataset. - -Both training and test error related metrics are very similar, and in some way, it makes sense: what we have learned from the training dataset matches the observations from the test dataset. - -If with your own dataset you have not such results, you should think about how you divided your dataset in training and test. May be there is something to fix. Again, `caret` package may [help](http://topepo.github.io/caret/data-splitting.html). - -For a better understanding of the learning progression, you may want to have some specific metric or even use multiple evaluation metrics. - -```{r watchlist2, message=F, warning=F} -bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic") -``` - -> `eval_metric` allows us to monitor two new metrics for each round, `logloss` and `error`. - -### Linear boosting - - -Until now, all the learnings we have performed were based on boosting trees. **XGBoost** implements a second algorithm, based on linear boosting. The only difference with previous command is `booster = "gblinear"` parameter (and removing `eta` parameter). - -```{r linearBoosting, message=F, warning=F} -bst <- xgb.train(data=dtrain, booster = "gblinear", max_depth=2, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic") -``` - -In this specific case, *linear boosting* gets slightly better performance metrics than decision trees based algorithm. - -In simple cases, it will happen because there is nothing better than a linear algorithm to catch a linear link. However, decision trees are much better to catch a non linear link between predictors and outcome. Because there is no silver bullet, we advise you to check both algorithms with your own datasets to have an idea of what to use. - -### Manipulating xgb.DMatrix - - -#### Save / Load - -Like saving models, `xgb.DMatrix` object (which groups both dataset and outcome) can also be saved using `xgb.DMatrix.save` function. - -```{r DMatrixSave, message=F, warning=F} -xgb.DMatrix.save(dtrain, "dtrain.buffer") -# to load it in, simply call xgb.DMatrix -dtrain2 <- xgb.DMatrix("dtrain.buffer") -bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic") -``` - -```{r DMatrixDel, include=FALSE} -file.remove("dtrain.buffer") -``` - -#### Information extraction - -Information can be extracted from `xgb.DMatrix` using `getinfo` function. Hereafter we will extract `label` data. - -```{r getinfo, message=F, warning=F} -label = getinfo(dtest, "label") -pred <- predict(bst, dtest) -err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label) -print(paste("test-error=", err)) -``` - -### View feature importance/influence from the learnt model - - -Feature importance is similar to R gbm package's relative influence (rel.inf). - -``` -importance_matrix <- xgb.importance(model = bst) -print(importance_matrix) -xgb.plot.importance(importance_matrix = importance_matrix) -``` - -#### View the trees from a model - - -You can dump the tree you learned using `xgb.dump` into a text file. - -```{r dump, message=T, warning=F} -xgb.dump(bst, with_stats = T) -``` - -You can plot the trees from your model using ```xgb.plot.tree`` - -``` -xgb.plot.tree(model = bst) -``` - -> if you provide a path to `fname` parameter you can save the trees to your hard drive. - -#### Save and load models - - -Maybe your dataset is big, and it takes time to train a model on it? May be you are not a big fan of losing time in redoing the same task again and again? In these very rare cases, you will want to save your model and load it when required. - -Hopefully for you, **XGBoost** implements such functions. - -```{r saveModel, message=F, warning=F} -# save model to binary local file -xgb.save(bst, "xgboost.model") -``` - -> `xgb.save` function should return `r TRUE` if everything goes well and crashes otherwise. - -An interesting test to see how identical our saved model is to the original one would be to compare the two predictions. - -```{r loadModel, message=F, warning=F} -# load binary model to R -bst2 <- xgb.load("xgboost.model") -pred2 <- predict(bst2, test$data) - -# And now the test -print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred)))) -``` - -```{r clean, include=FALSE} -# delete the created model -file.remove("./xgboost.model") -``` - -> result is `0`? We are good! - -In some very specific cases, like when you want to pilot **XGBoost** from `caret` package, you will want to save the model as a *R* binary vector. See below how to do it. - -```{r saveLoadRBinVectorModel, message=F, warning=F} -# save model to R's raw vector -rawVec <- xgb.serialize(bst) - -# print class -print(class(rawVec)) - -# load binary model to R -bst3 <- xgb.load(rawVec) -pred3 <- predict(bst3, test$data) - -# pred2 should be identical to pred -print(paste("sum(abs(pred3-pred))=", sum(abs(pred2-pred)))) -``` - -> Again `0`? It seems that `XGBoost` works pretty well! - -## References diff --git a/ml-xgboost/R-package/vignettes/xgboostfromJSON.Rmd b/ml-xgboost/R-package/vignettes/xgboostfromJSON.Rmd deleted file mode 100644 index 492f3a7..0000000 --- a/ml-xgboost/R-package/vignettes/xgboostfromJSON.Rmd +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: "XGBoost from JSON" -output: - rmarkdown::html_vignette: - number_sections: yes - toc: yes -author: Roland Stevenson -vignette: > - %\VignetteIndexEntry{XGBoost from JSON} - %\VignetteEngine{knitr::rmarkdown} - \usepackage[utf8]{inputenc} ---- - -XGBoost from JSON -================= - -## Introduction - -The purpose of this Vignette is to show you how to correctly load and work with an **Xgboost** model that has been dumped to JSON. **Xgboost** internally converts all data to [32-bit floats](https://en.wikipedia.org/wiki/Single-precision_floating-point_format), and the values dumped to JSON are decimal representations of these values. When working with a model that has been parsed from a JSON file, care must be taken to correctly treat: - -- the input data, which should be converted to 32-bit floats -- any 32-bit floats that were stored in JSON as decimal representations -- any calculations must be done with 32-bit mathematical operators - -## Setup - -For the purpose of this tutorial we will load the xgboost, jsonlite, and float packages. We'll also set `digits=22` in our options in case we want to inspect many digits of our results. - -```{r} -require(xgboost) -require(jsonlite) -require(float) -options(digits=22) -``` - -We will create a toy binary logistic model based on the example first provided [here](https://github.com/dmlc/xgboost/issues/3960), so that we can easily understand the structure of the dumped JSON model object. This will allow us to understand where discrepancies can occur and how they should be handled. - -```{r} -dates <- c(20180130, 20180130, 20180130, - 20180130, 20180130, 20180130, - 20180131, 20180131, 20180131, - 20180131, 20180131, 20180131, - 20180131, 20180131, 20180131, - 20180134, 20180134, 20180134) - -labels <- c(1, 1, 1, - 1, 1, 1, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0) - -data <- data.frame(dates = dates, labels=labels) - -bst <- xgboost( - data = as.matrix(data$dates), - label = labels, - nthread = 2, - nrounds = 1, - objective = "binary:logistic", - missing = NA, - max_depth = 1 -) -``` - -## Comparing results -We will now dump the model to JSON and attempt to illustrate a variety of issues that can arise, and how to properly deal with them. - -First let's dump the model to JSON: - -```{r} -bst_json <- xgb.dump(bst, with_stats = FALSE, dump_format='json') -bst_from_json <- fromJSON(bst_json, simplifyDataFrame = FALSE) -node <- bst_from_json[[1]] -cat(bst_json) -``` - -The tree JSON shown by the above code-chunk tells us that if the data is less than 20180132, the tree will output the value in the first leaf. Otherwise it will output the value in the second leaf. Let's try to reproduce this manually with the data we have and confirm that it matches the model predictions we've already calculated. - -```{r} -bst_preds_logodds <- predict(bst,as.matrix(data$dates), outputmargin = TRUE) - -# calculate the logodds values using the JSON representation -bst_from_json_logodds <- ifelse(data$dates When working with imported JSON, all data must be converted to 32-bit floats - -To explain this, let's repeat the comparison and round to two decimals: - -```{r} -round(bst_preds_logodds,2) == round(bst_from_json_logodds,2) -``` - -If we round to two decimals, we see that only the elements related to data values of `20180131` don't agree. If we convert the data to floats, they agree: - -```{r} -# now convert the dates to floats first -bst_from_json_logodds <- ifelse(fl(data$dates) All JSON parameters stored as floats must be converted to floats. - -Let's now say we do care about numbers past the first two decimals. - -```{r} -# test that values are equal -bst_preds_logodds == bst_from_json_logodds -``` - -None are exactly equal. What happened? Although we've converted the data to 32-bit floats, we also need to convert the JSON parameters to 32-bit floats. Let's do this: - -```{r} -# now convert the dates to floats first -bst_from_json_logodds <- ifelse(fl(data$dates) Always use 32-bit numbers and operators - -We were able to get the log-odds to agree, so now let's manually calculate the sigmoid of the log-odds. This should agree with the xgboost predictions. - - -```{r} -bst_preds <- predict(bst,as.matrix(data$dates)) - -# calculate the predictions casting doubles to floats -bst_from_json_preds <- ifelse(fl(data$dates) eXtreme Gradient Boosting -=========== -[![Build Status](https://xgboost-ci.net/job/xgboost/job/master/badge/icon?style=plastic)](https://xgboost-ci.net/blue/organizations/jenkins/xgboost/activity) -[![Build Status](https://img.shields.io/travis/dmlc/xgboost.svg?label=build&logo=travis&branch=master)](https://travis-ci.org/dmlc/xgboost) -[![Build Status](https://ci.appveyor.com/api/projects/status/5ypa8vaed6kpmli8?svg=true)](https://ci.appveyor.com/project/tqchen/xgboost) -[![Documentation Status](https://readthedocs.org/projects/xgboost/badge/?version=latest)](https://xgboost.readthedocs.org) -[![GitHub license](http://dmlc.github.io/img/apache2.svg)](./LICENSE) -[![CRAN Status Badge](http://www.r-pkg.org/badges/version/xgboost)](http://cran.r-project.org/web/packages/xgboost) -[![PyPI version](https://badge.fury.io/py/xgboost.svg)](https://pypi.python.org/pypi/xgboost/) -[![Optuna](https://img.shields.io/badge/Optuna-integrated-blue)](https://optuna.org) - -[Community](https://xgboost.ai/community) | -[Documentation](https://xgboost.readthedocs.org) | -[Resources](demo/README.md) | -[Contributors](CONTRIBUTORS.md) | -[Release Notes](NEWS.md) - -XGBoost is an optimized distributed gradient boosting library designed to be highly ***efficient***, ***flexible*** and ***portable***. -It implements machine learning algorithms under the [Gradient Boosting](https://en.wikipedia.org/wiki/Gradient_boosting) framework. -XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way. -The same code runs on major distributed environment (Kubernetes, Hadoop, SGE, MPI, Dask) and can solve problems beyond billions of examples. - -License -------- -© Contributors, 2019. Licensed under an [Apache-2](https://github.com/dmlc/xgboost/blob/master/LICENSE) license. - -Contribute to XGBoost ---------------------- -XGBoost has been developed and used by a group of active community members. Your help is very valuable to make the package better for everyone. -Checkout the [Community Page](https://xgboost.ai/community). - -Reference ---------- -- Tianqi Chen and Carlos Guestrin. [XGBoost: A Scalable Tree Boosting System](http://arxiv.org/abs/1603.02754). In 22nd SIGKDD Conference on Knowledge Discovery and Data Mining, 2016 -- XGBoost originates from research project at University of Washington. - -Sponsors --------- -Become a sponsor and get a logo here. See details at [Sponsoring the XGBoost Project](https://xgboost.ai/sponsors). The funds are used to defray the cost of continuous integration and testing infrastructure (https://xgboost-ci.net). - -## Open Source Collective sponsors -[![Backers on Open Collective](https://opencollective.com/xgboost/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/xgboost/sponsors/badge.svg)](#sponsors) - -### Sponsors -[[Become a sponsor](https://opencollective.com/xgboost#sponsor)] - - -NVIDIA - - - - - - - - - - -### Backers -[[Become a backer](https://opencollective.com/xgboost#backer)] - - - -## Other sponsors -The sponsors in this list are donating cloud hours in lieu of cash donation. - -Amazon Web Services diff --git a/ml-xgboost/amalgamation/dmlc-minimum0.cc b/ml-xgboost/amalgamation/dmlc-minimum0.cc deleted file mode 100644 index d8594aa..0000000 --- a/ml-xgboost/amalgamation/dmlc-minimum0.cc +++ /dev/null @@ -1,16 +0,0 @@ -/*! - * Copyright 2015 by Contributors. - * \brief Mininum DMLC library Amalgamation, used for easy plugin of dmlc lib. - * Normally this is not needed. - */ -#include "../dmlc-core/src/io/line_split.cc" -#include "../dmlc-core/src/io/recordio_split.cc" -#include "../dmlc-core/src/io/input_split_base.cc" -#include "../dmlc-core/src/io/local_filesys.cc" -#include "../dmlc-core/src/io/filesys.cc" -#include "../dmlc-core/src/io/indexed_recordio_split.cc" -#include "../dmlc-core/src/data.cc" -#include "../dmlc-core/src/io.cc" -#include "../dmlc-core/src/recordio.cc" - - diff --git a/ml-xgboost/amalgamation/xgboost-all0.cc b/ml-xgboost/amalgamation/xgboost-all0.cc deleted file mode 100644 index 5f7e75f..0000000 --- a/ml-xgboost/amalgamation/xgboost-all0.cc +++ /dev/null @@ -1,83 +0,0 @@ -/*! - * Copyright 2015-2019 by Contributors. - * \brief XGBoost Amalgamation. - * This offers an alternative way to compile the entire library from this single file. - * - * Example usage command. - * - $(CXX) -std=c++0x -fopenmp -o -shared libxgboost.so xgboost-all0.cc -ldmlc -lrabit - * - * \author Tianqi Chen. - */ - -// metrics -#include "../src/metric/metric.cc" -#include "../src/metric/elementwise_metric.cc" -#include "../src/metric/multiclass_metric.cc" -#include "../src/metric/rank_metric.cc" -#include "../src/metric/survival_metric.cc" - -// objectives -#include "../src/objective/objective.cc" -#include "../src/objective/regression_obj.cc" -#include "../src/objective/multiclass_obj.cc" -#include "../src/objective/rank_obj.cc" -#include "../src/objective/hinge.cc" -#include "../src/objective/aft_obj.cc" - -// gbms -#include "../src/gbm/gbm.cc" -#include "../src/gbm/gbtree.cc" -#include "../src/gbm/gbtree_model.cc" -#include "../src/gbm/gblinear.cc" -#include "../src/gbm/gblinear_model.cc" - -// data -#include "../src/data/data.cc" -#include "../src/data/simple_dmatrix.cc" -#include "../src/data/sparse_page_raw_format.cc" -#include "../src/data/ellpack_page.cc" -#include "../src/data/ellpack_page_source.cc" - -// prediction -#include "../src/predictor/predictor.cc" -#include "../src/predictor/cpu_predictor.cc" - -#if DMLC_ENABLE_STD_THREAD -#include "../src/data/sparse_page_dmatrix.cc" -#endif - -// trees -#include "../src/tree/param.cc" -#include "../src/tree/split_evaluator.cc" -#include "../src/tree/tree_model.cc" -#include "../src/tree/tree_updater.cc" -#include "../src/tree/updater_colmaker.cc" -#include "../src/tree/updater_quantile_hist.cc" -#include "../src/tree/updater_prune.cc" -#include "../src/tree/updater_refresh.cc" -#include "../src/tree/updater_sync.cc" -#include "../src/tree/updater_histmaker.cc" -#include "../src/tree/updater_skmaker.cc" -#include "../src/tree/constraints.cc" - -// linear -#include "../src/linear/linear_updater.cc" -#include "../src/linear/updater_coordinate.cc" -#include "../src/linear/updater_shotgun.cc" - -// global -#include "../src/learner.cc" -#include "../src/logging.cc" -#include "../src/common/common.cc" -#include "../src/common/timer.cc" -#include "../src/common/host_device_vector.cc" -#include "../src/common/hist_util.cc" -#include "../src/common/json.cc" -#include "../src/common/io.cc" -#include "../src/common/survival_util.cc" -#include "../src/common/probability_distribution.cc" -#include "../src/common/version.cc" - -// c_api -#include "../src/c_api/c_api.cc" -#include "../src/c_api/c_api_error.cc" diff --git a/ml-xgboost/appveyor.yml b/ml-xgboost/appveyor.yml deleted file mode 100644 index 68933db..0000000 --- a/ml-xgboost/appveyor.yml +++ /dev/null @@ -1,133 +0,0 @@ -environment: - R_ARCH: x64 - USE_RTOOLS: true - matrix: - - target: msvc - ver: 2015 - generator: "Visual Studio 14 2015 Win64" - configuration: Debug - - target: msvc - ver: 2015 - generator: "Visual Studio 14 2015 Win64" - configuration: Release - - target: mingw - generator: "Unix Makefiles" - - target: jvm - - target: rmsvc - ver: 2015 - generator: "Visual Studio 14 2015 Win64" - configuration: Release - - target: rmingw - generator: "Unix Makefiles" - -#matrix: -# fast_finish: true - -platform: - - x64 - -install: - - git submodule update --init --recursive - # MinGW - - set PATH=C:\msys64\mingw64\bin;C:\msys64\usr\bin;%PATH% - - gcc -v - - ls -l C:\ - # Miniconda3 - - call C:\Miniconda3-x64\Scripts\activate.bat - - conda info - - where python - - python --version - # do python build for mingw and one of the msvc jobs - - set DO_PYTHON=off - - if /i "%target%" == "mingw" set DO_PYTHON=on - - if /i "%target%_%ver%_%configuration%" == "msvc_2015_Release" set DO_PYTHON=on - - if /i "%DO_PYTHON%" == "on" ( - conda config --set always_yes true && - conda update -q conda && - conda install -y numpy scipy pandas matplotlib pytest scikit-learn graphviz python-graphviz - ) - - set PATH=C:\Miniconda3-x64\Library\bin\graphviz;%PATH% - # R: based on https://github.com/krlmlr/r-appveyor - - ps: | - if($env:target -eq 'rmingw' -or $env:target -eq 'rmsvc') { - #$ErrorActionPreference = "Stop" - Invoke-WebRequest https://raw.githubusercontent.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1" - Import-Module "$Env:TEMP\appveyor-tool.ps1" - Bootstrap - $BINARY_DEPS = "c('XML','igraph')" - cmd.exe /c "R.exe -q -e ""install.packages($BINARY_DEPS, repos='$CRAN', type='win.binary')"" 2>&1" - $DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','knitr','rmarkdown')" - cmd.exe /c "R.exe -q -e ""install.packages($DEPS, repos='$CRAN', type='both')"" 2>&1" - } - -build_script: - - cd %APPVEYOR_BUILD_FOLDER% - - if /i "%target%" == "msvc" ( - mkdir build_msvc%ver% && - cd build_msvc%ver% && - cmake .. -G"%generator%" -DCMAKE_CONFIGURATION_TYPES="Release;Debug;" && - msbuild xgboost.sln - ) - - if /i "%target%" == "mingw" ( - mkdir build_mingw && - cd build_mingw && - cmake .. -G"%generator%" && - make -j2 - ) - # Python package - - if /i "%DO_PYTHON%" == "on" ( - cd %APPVEYOR_BUILD_FOLDER%\python-package && - python setup.py install && - mkdir wheel && - python setup.py bdist_wheel --universal --plat-name win-amd64 -d wheel - ) - # R package: make + mingw standard CRAN packaging (only x64 for now) - - if /i "%target%" == "rmingw" ( - make Rbuild && - ls -l && - R.exe CMD INSTALL xgboost*.tar.gz - ) - # R package: cmake + VC2015 - - if /i "%target%" == "rmsvc" ( - mkdir build_rmsvc%ver% && - cd build_rmsvc%ver% && - cmake .. -G"%generator%" -DCMAKE_CONFIGURATION_TYPES="Release" -DR_LIB=ON && - cmake --build . --target install --config Release - ) - - if /i "%target%" == "jvm" cd jvm-packages && mvn test -pl :boostkit-xgboost4j_2.11 - -test_script: - - cd %APPVEYOR_BUILD_FOLDER% - - if /i "%DO_PYTHON%" == "on" python -m pytest tests/python - # mingw R package: run the R check (which includes unit tests), and also keep the built binary package - - if /i "%target%" == "rmingw" ( - set _R_CHECK_CRAN_INCOMING_=FALSE&& - set _R_CHECK_FORCE_SUGGESTS_=FALSE&& - R.exe CMD check xgboost*.tar.gz --no-manual --no-build-vignettes --as-cran --install-args=--build - ) - # MSVC R package: run only the unit tests - - if /i "%target%" == "rmsvc" ( - cd build_rmsvc%ver%\R-package && - R.exe -q -e "library(testthat); setwd('tests'); source('testthat.R')" - ) - -on_failure: - # keep the whole output of R check - - if /i "%target%" == "rmingw" ( - 7z a failure.zip *.Rcheck\* && - appveyor PushArtifact failure.zip - ) - -artifacts: - # log from R check - - path: '*.Rcheck\**\*.log' - name: Logs - # source R-package - - path: '\xgboost_*.tar.gz' - name: Bits - # binary R-package - - path: '**\xgboost_*.zip' - name: Bits - # binary Python wheel package - - path: '**\*.whl' - name: Bits diff --git a/ml-xgboost/cmake/Doc.cmake b/ml-xgboost/cmake/Doc.cmake deleted file mode 100644 index 2ffa005..0000000 --- a/ml-xgboost/cmake/Doc.cmake +++ /dev/null @@ -1,16 +0,0 @@ -function (run_doxygen) - find_package(Doxygen REQUIRED) - - if (NOT DOXYGEN_DOT_FOUND) - message(FATAL_ERROR "Command `dot` not found. Please install graphviz.") - endif (NOT DOXYGEN_DOT_FOUND) - - configure_file( - ${xgboost_SOURCE_DIR}/doc/Doxyfile.in - ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY) - add_custom_target( doc_doxygen ALL - COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - COMMENT "Generate C APIs documentation." - VERBATIM) -endfunction (run_doxygen) diff --git a/ml-xgboost/cmake/FindPrefetchIntrinsics.cmake b/ml-xgboost/cmake/FindPrefetchIntrinsics.cmake deleted file mode 100644 index b00ff57..0000000 --- a/ml-xgboost/cmake/FindPrefetchIntrinsics.cmake +++ /dev/null @@ -1,22 +0,0 @@ -function (find_prefetch_intrinsics) - include(CheckCXXSourceCompiles) - check_cxx_source_compiles(" - #include - int main() { - char data = 0; - const char* address = &data; - _mm_prefetch(address, _MM_HINT_NTA); - return 0; - } - " XGBOOST_MM_PREFETCH_PRESENT) - check_cxx_source_compiles(" - int main() { - char data = 0; - const char* address = &data; - __builtin_prefetch(address, 0, 0); - return 0; - } - " XGBOOST_BUILTIN_PREFETCH_PRESENT) - set(XGBOOST_MM_PREFETCH_PRESENT ${XGBOOST_MM_PREFETCH_PRESENT} PARENT_SCOPE) - set(XGBOOST_BUILTIN_PREFETCH_PRESENT ${XGBOOST_BUILTIN_PREFETCH_PRESENT} PARENT_SCOPE) -endfunction (find_prefetch_intrinsics) diff --git a/ml-xgboost/cmake/Python_version.in b/ml-xgboost/cmake/Python_version.in deleted file mode 100644 index c55458e..0000000 --- a/ml-xgboost/cmake/Python_version.in +++ /dev/null @@ -1 +0,0 @@ -@xgboost_VERSION_MAJOR@.@xgboost_VERSION_MINOR@.@xgboost_VERSION_PATCH@ diff --git a/ml-xgboost/cmake/Sanitizer.cmake b/ml-xgboost/cmake/Sanitizer.cmake deleted file mode 100644 index c1afb14..0000000 --- a/ml-xgboost/cmake/Sanitizer.cmake +++ /dev/null @@ -1,63 +0,0 @@ -# Set appropriate compiler and linker flags for sanitizers. -# -# Usage of this module: -# enable_sanitizers("address;leak") - -# Add flags -macro(enable_sanitizer sanitizer) - if(${sanitizer} MATCHES "address") - find_package(ASan REQUIRED) - set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=address") - link_libraries(${ASan_LIBRARY}) - - elseif(${sanitizer} MATCHES "thread") - find_package(TSan REQUIRED) - set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=thread") - link_libraries(${TSan_LIBRARY}) - - elseif(${sanitizer} MATCHES "leak") - find_package(LSan REQUIRED) - set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=leak") - link_libraries(${LSan_LIBRARY}) - - elseif(${sanitizer} MATCHES "undefined") - find_package(UBSan REQUIRED) - set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=undefined -fno-sanitize-recover=undefined") - link_libraries(${UBSan_LIBRARY}) - - else() - message(FATAL_ERROR "Santizer ${sanitizer} not supported.") - endif() -endmacro() - -macro(enable_sanitizers SANITIZERS) - # Check sanitizers compatibility. - # Idealy, we should use if(san IN_LIST SANITIZERS) ... endif() - # But I haven't figure out how to make it work. - foreach ( _san ${SANITIZERS} ) - string(TOLOWER ${_san} _san) - if (_san MATCHES "thread") - if (${_use_other_sanitizers}) - message(FATAL_ERROR - "thread sanitizer is not compatible with ${_san} sanitizer.") - endif() - set(_use_thread_sanitizer 1) - else () - if (${_use_thread_sanitizer}) - message(FATAL_ERROR - "${_san} sanitizer is not compatible with thread sanitizer.") - endif() - set(_use_other_sanitizers 1) - endif() - endforeach() - - message("Sanitizers: ${SANITIZERS}") - - foreach( _san ${SANITIZERS} ) - string(TOLOWER ${_san} _san) - enable_sanitizer(${_san}) - endforeach() - message("Sanitizers compile flags: ${SAN_COMPILE_FLAGS}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_COMPILE_FLAGS}") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_COMPILE_FLAGS}") -endmacro() diff --git a/ml-xgboost/cmake/Utils.cmake b/ml-xgboost/cmake/Utils.cmake deleted file mode 100644 index 4a9e63a..0000000 --- a/ml-xgboost/cmake/Utils.cmake +++ /dev/null @@ -1,143 +0,0 @@ -# Automatically set source group based on folder -function(auto_source_group SOURCES) - - foreach(FILE ${SOURCES}) - get_filename_component(PARENT_DIR "${FILE}" PATH) - - # skip src or include and changes /'s to \\'s - string(REPLACE "${CMAKE_CURRENT_LIST_DIR}" "" GROUP "${PARENT_DIR}") - string(REPLACE "/" "\\\\" GROUP "${GROUP}") - string(REGEX REPLACE "^\\\\" "" GROUP "${GROUP}") - - source_group("${GROUP}" FILES "${FILE}") - endforeach() -endfunction(auto_source_group) - -# Force static runtime for MSVC -function(msvc_use_static_runtime) - if(MSVC) - set(variables - CMAKE_C_FLAGS_DEBUG - CMAKE_C_FLAGS_MINSIZEREL - CMAKE_C_FLAGS_RELEASE - CMAKE_C_FLAGS_RELWITHDEBINFO - CMAKE_CXX_FLAGS_DEBUG - CMAKE_CXX_FLAGS_MINSIZEREL - CMAKE_CXX_FLAGS_RELEASE - CMAKE_CXX_FLAGS_RELWITHDEBINFO - ) - foreach(variable ${variables}) - if(${variable} MATCHES "/MD") - string(REGEX REPLACE "/MD" "/MT" ${variable} "${${variable}}") - set(${variable} "${${variable}}" PARENT_SCOPE) - endif() - endforeach() - set(variables - CMAKE_CUDA_FLAGS - CMAKE_CUDA_FLAGS_DEBUG - CMAKE_CUDA_FLAGS_MINSIZEREL - CMAKE_CUDA_FLAGS_RELEASE - CMAKE_CUDA_FLAGS_RELWITHDEBINFO - ) - foreach(variable ${variables}) - if(${variable} MATCHES "-MD") - string(REGEX REPLACE "-MD" "-MT" ${variable} "${${variable}}") - set(${variable} "${${variable}}" PARENT_SCOPE) - endif() - if(${variable} MATCHES "/MD") - string(REGEX REPLACE "/MD" "/MT" ${variable} "${${variable}}") - set(${variable} "${${variable}}" PARENT_SCOPE) - endif() - endforeach() - endif() -endfunction(msvc_use_static_runtime) - -# Set output directory of target, ignoring debug or release -function(set_output_directory target dir) - set_target_properties(${target} PROPERTIES - RUNTIME_OUTPUT_DIRECTORY ${dir} - RUNTIME_OUTPUT_DIRECTORY_DEBUG ${dir} - RUNTIME_OUTPUT_DIRECTORY_RELEASE ${dir} - RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO ${dir} - RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${dir} - LIBRARY_OUTPUT_DIRECTORY ${dir} - LIBRARY_OUTPUT_DIRECTORY_DEBUG ${dir} - LIBRARY_OUTPUT_DIRECTORY_RELEASE ${dir} - LIBRARY_OUTPUT_DIRECTORY_RELWITHDEBINFO ${dir} - LIBRARY_OUTPUT_DIRECTORY_MINSIZEREL ${dir} - ARCHIVE_OUTPUT_DIRECTORY ${dir} - ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${dir} - ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${dir} - ARCHIVE_OUTPUT_DIRECTORY_RELWITHDEBINFO ${dir} - ARCHIVE_OUTPUT_DIRECTORY_MINSIZEREL ${dir} - ) -endfunction(set_output_directory) - -# Set a default build type to release if none was specified -function(set_default_configuration_release) - if(CMAKE_CONFIGURATION_TYPES STREQUAL "Debug;Release;MinSizeRel;RelWithDebInfo") # multiconfig generator? - set(CMAKE_CONFIGURATION_TYPES Release CACHE STRING "" FORCE) - elseif(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) - message(STATUS "Setting build type to 'Release' as none was specified.") - set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE ) - endif() -endfunction(set_default_configuration_release) - -# Generate nvcc compiler flags given a list of architectures -# Also generates PTX for the most recent architecture for forwards compatibility -function(format_gencode_flags flags out) - if(CMAKE_CUDA_COMPILER_VERSION MATCHES "^([0-9]+\\.[0-9]+)") - set(CUDA_VERSION "${CMAKE_MATCH_1}") - endif() - # Set up architecture flags - if(NOT flags) - if(CUDA_VERSION VERSION_GREATER_EQUAL "10.0") - set(flags "35;50;52;60;61;70;75") - elseif(CUDA_VERSION VERSION_GREATER_EQUAL "9.0") - set(flags "35;50;52;60;61;70") - else() - set(flags "35;50;52;60;61") - endif() - endif() - # Generate SASS - foreach(ver ${flags}) - set(${out} "${${out}}--generate-code=arch=compute_${ver},code=sm_${ver};") - endforeach() - # Generate PTX for last architecture - list(GET flags -1 ver) - set(${out} "${${out}}--generate-code=arch=compute_${ver},code=compute_${ver};") - - set(${out} "${${out}}" PARENT_SCOPE) -endfunction(format_gencode_flags flags) - -# Assembles the R-package files in build_dir; -# if necessary, installs the main R package dependencies; -# runs R CMD INSTALL. -function(setup_rpackage_install_target rlib_target build_dir) - # backup cmake_install.cmake - install(CODE "file(COPY \"${build_dir}/R-package/cmake_install.cmake\" -DESTINATION \"${build_dir}/bak\")") - - install(CODE "file(REMOVE_RECURSE \"${build_dir}/R-package\")") - install( - DIRECTORY "${xgboost_SOURCE_DIR}/R-package" - DESTINATION "${build_dir}" - REGEX "src/*" EXCLUDE - REGEX "R-package/configure" EXCLUDE - ) - install(TARGETS ${rlib_target} - LIBRARY DESTINATION "${build_dir}/R-package/src/" - RUNTIME DESTINATION "${build_dir}/R-package/src/") - install(CODE "file(WRITE \"${build_dir}/R-package/src/Makevars\" \"all:\")") - install(CODE "file(WRITE \"${build_dir}/R-package/src/Makevars.win\" \"all:\")") - set(XGB_DEPS_SCRIPT - "deps = setdiff(c('data.table', 'magrittr', 'stringi'), rownames(installed.packages()));\ - if(length(deps)>0) install.packages(deps, repo = 'https://cloud.r-project.org/')") - install(CODE "execute_process(COMMAND \"${LIBR_EXECUTABLE}\" \"-q\" \"-e\" \"${XGB_DEPS_SCRIPT}\")") - install(CODE "execute_process(COMMAND \"${LIBR_EXECUTABLE}\" CMD INSTALL\ - \"--no-multiarch\" \"--build\" \"${build_dir}/R-package\")") - - # restore cmake_install.cmake - install(CODE "file(RENAME \"${build_dir}/bak/cmake_install.cmake\" - \"${build_dir}/R-package/cmake_install.cmake\")") -endfunction(setup_rpackage_install_target) diff --git a/ml-xgboost/cmake/Version.cmake b/ml-xgboost/cmake/Version.cmake deleted file mode 100644 index f38ce3c..0000000 --- a/ml-xgboost/cmake/Version.cmake +++ /dev/null @@ -1,9 +0,0 @@ -function (write_version) - message(STATUS "xgboost VERSION: ${xgboost_VERSION}") - configure_file( - ${xgboost_SOURCE_DIR}/cmake/version_config.h.in - ${xgboost_SOURCE_DIR}/include/xgboost/version_config.h @ONLY) - configure_file( - ${xgboost_SOURCE_DIR}/cmake/Python_version.in - ${xgboost_SOURCE_DIR}/python-package/xgboost/VERSION @ONLY) -endfunction (write_version) diff --git a/ml-xgboost/cmake/modules/FindASan.cmake b/ml-xgboost/cmake/modules/FindASan.cmake deleted file mode 100644 index e7b2738..0000000 --- a/ml-xgboost/cmake/modules/FindASan.cmake +++ /dev/null @@ -1,13 +0,0 @@ -set(ASan_LIB_NAME ASan) - -find_library(ASan_LIBRARY - NAMES libasan.so libasan.so.5 libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0 - PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(ASan DEFAULT_MSG - ASan_LIBRARY) - -mark_as_advanced( - ASan_LIBRARY - ASan_LIB_NAME) diff --git a/ml-xgboost/cmake/modules/FindLSan.cmake b/ml-xgboost/cmake/modules/FindLSan.cmake deleted file mode 100644 index 3f68fb0..0000000 --- a/ml-xgboost/cmake/modules/FindLSan.cmake +++ /dev/null @@ -1,13 +0,0 @@ -set(LSan_LIB_NAME lsan) - -find_library(LSan_LIBRARY - NAMES liblsan.so liblsan.so.0 liblsan.so.0.0.0 - PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(LSan DEFAULT_MSG - LSan_LIBRARY) - -mark_as_advanced( - LSan_LIBRARY - LSan_LIB_NAME) diff --git a/ml-xgboost/cmake/modules/FindLibR.cmake b/ml-xgboost/cmake/modules/FindLibR.cmake deleted file mode 100644 index 0110439..0000000 --- a/ml-xgboost/cmake/modules/FindLibR.cmake +++ /dev/null @@ -1,183 +0,0 @@ -# CMake module for R -# Borrows ideas from RStudio's FindLibR.cmake -# -# Defines the following: -# LIBR_FOUND -# LIBR_HOME -# LIBR_EXECUTABLE -# LIBR_INCLUDE_DIRS -# LIBR_LIB_DIR -# LIBR_CORE_LIBRARY -# and a cmake function to create R.lib for MSVC -# -# The following could be provided by user through cmake's -D options: -# LIBR_EXECUTABLE (for unix and win) -# R_VERSION (for win) -# R_ARCH (for win 64 when want 32 bit build) -# -# TODO: -# - someone to verify OSX detection, -# - possibly, add OSX detection based on current R in PATH or LIBR_EXECUTABLE -# - improve registry-based R_HOME detection in Windows (from a set of R_VERSION's) - - -# Windows users might want to change this to their R version: -if(NOT R_VERSION) - set(R_VERSION "3.4.1") -endif() -if(NOT R_ARCH) - if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "4") - set(R_ARCH "i386") - else() - set(R_ARCH "x64") - endif() -endif() - - -# Creates R.lib and R.def in the build directory for linking with MSVC -function(create_rlib_for_msvc) - # various checks and warnings - if(NOT WIN32 OR NOT MSVC) - message(FATAL_ERROR "create_rlib_for_msvc() can only be used with MSVC") - endif() - if(NOT EXISTS "${LIBR_LIB_DIR}") - message(FATAL_ERROR "LIBR_LIB_DIR was not set!") - endif() - find_program(GENDEF_EXE gendef) - find_program(DLLTOOL_EXE dlltool) - if(NOT GENDEF_EXE OR NOT DLLTOOL_EXE) - message(FATAL_ERROR "\nEither gendef.exe or dlltool.exe not found!\ - \nDo you have Rtools installed with its MinGW's bin/ in PATH?") - endif() - # extract symbols from R.dll into R.def and R.lib import library - execute_process(COMMAND ${GENDEF_EXE} - "-" "${LIBR_LIB_DIR}/R.dll" - OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/R.def") - execute_process(COMMAND ${DLLTOOL_EXE} - "--input-def" "${CMAKE_CURRENT_BINARY_DIR}/R.def" - "--output-lib" "${CMAKE_CURRENT_BINARY_DIR}/R.lib") -endfunction(create_rlib_for_msvc) - - -# detection for OSX -if(APPLE) - - find_library(LIBR_LIBRARIES R) - - if(LIBR_LIBRARIES MATCHES ".*\\.framework") - set(LIBR_HOME "${LIBR_LIBRARIES}/Resources" CACHE PATH "R home directory") - set(LIBR_INCLUDE_DIRS "${LIBR_HOME}/include" CACHE PATH "R include directory") - set(LIBR_EXECUTABLE "${LIBR_HOME}/R" CACHE PATH "R executable") - set(LIBR_LIB_DIR "${LIBR_HOME}/lib" CACHE PATH "R lib directory") - else() - get_filename_component(_LIBR_LIBRARIES "${LIBR_LIBRARIES}" REALPATH) - get_filename_component(_LIBR_LIBRARIES_DIR "${_LIBR_LIBRARIES}" DIRECTORY) - set(LIBR_EXECUTABLE "${_LIBR_LIBRARIES_DIR}/../bin/R") - execute_process( - COMMAND ${LIBR_EXECUTABLE} "--slave" "--vanilla" "-e" "cat(R.home())" - OUTPUT_VARIABLE LIBR_HOME) - set(LIBR_HOME ${LIBR_HOME} CACHE PATH "R home directory") - set(LIBR_INCLUDE_DIRS "${LIBR_HOME}/include" CACHE PATH "R include directory") - set(LIBR_LIB_DIR "${LIBR_HOME}/lib" CACHE PATH "R lib directory") - endif() - -# detection for UNIX & Win32 -else() - - # attempt to find R executable - if(NOT LIBR_EXECUTABLE) - find_program(LIBR_EXECUTABLE NAMES R R.exe) - endif() - - if(UNIX) - - if(NOT LIBR_EXECUTABLE) - message(FATAL_ERROR "Unable to locate R executable.\ - \nEither add its location to PATH or provide it through the LIBR_EXECUTABLE cmake variable") - endif() - - # ask R for the home path - execute_process( - COMMAND ${LIBR_EXECUTABLE} "--slave" "--vanilla" "-e" "cat(R.home())" - OUTPUT_VARIABLE LIBR_HOME - ) - # ask R for the include dir - execute_process( - COMMAND ${LIBR_EXECUTABLE} "--slave" "--vanilla" "-e" "cat(R.home('include'))" - OUTPUT_VARIABLE LIBR_INCLUDE_DIRS - ) - # ask R for the lib dir - execute_process( - COMMAND ${LIBR_EXECUTABLE} "--slave" "--vanilla" "-e" "cat(R.home('lib'))" - OUTPUT_VARIABLE LIBR_LIB_DIR - ) - - # Windows - else() - # ask R for R_HOME - if(LIBR_EXECUTABLE) - execute_process( - COMMAND ${LIBR_EXECUTABLE} "--slave" "--no-save" "-e" "cat(normalizePath(R.home(),winslash='/'))" - OUTPUT_VARIABLE LIBR_HOME) - endif() - # if R executable not available, query R_HOME path from registry - if(NOT LIBR_HOME) - get_filename_component(LIBR_HOME - "[HKEY_LOCAL_MACHINE\\SOFTWARE\\R-core\\R\\${R_VERSION};InstallPath]" - ABSOLUTE) - if(NOT LIBR_HOME) - message(FATAL_ERROR "\nUnable to locate R executable.\ - \nEither add its location to PATH or provide it through the LIBR_EXECUTABLE cmake variable") - endif() - endif() - # set exe location based on R_ARCH - if(NOT LIBR_EXECUTABLE) - set(LIBR_EXECUTABLE "${LIBR_HOME}/bin/${R_ARCH}/R.exe") - endif() - # set other R paths based on home path - set(LIBR_INCLUDE_DIRS "${LIBR_HOME}/include") - set(LIBR_LIB_DIR "${LIBR_HOME}/bin/${R_ARCH}") - -message(STATUS "LIBR_HOME [${LIBR_HOME}]") -message(STATUS "LIBR_EXECUTABLE [${LIBR_EXECUTABLE}]") -message(STATUS "LIBR_INCLUDE_DIRS [${LIBR_INCLUDE_DIRS}]") -message(STATUS "LIBR_LIB_DIR [${LIBR_LIB_DIR}]") -message(STATUS "LIBR_CORE_LIBRARY [${LIBR_CORE_LIBRARY}]") - - endif() - -endif() - -if(WIN32 AND MSVC) - # create a local R.lib import library for R.dll if it doesn't exist - if(NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/R.lib") - create_rlib_for_msvc() - endif() -endif() - -# look for the core R library -find_library(LIBR_CORE_LIBRARY NAMES R - HINTS "${CMAKE_CURRENT_BINARY_DIR}" "${LIBR_LIB_DIR}" "${LIBR_HOME}/bin" "${LIBR_LIBRARIES}") -if(LIBR_CORE_LIBRARY-NOTFOUND) - message(STATUS "Could not find R core shared library.") -endif() - -set(LIBR_HOME ${LIBR_HOME} CACHE PATH "R home directory") -set(LIBR_EXECUTABLE ${LIBR_EXECUTABLE} CACHE PATH "R executable") -set(LIBR_INCLUDE_DIRS ${LIBR_INCLUDE_DIRS} CACHE PATH "R include directory") -set(LIBR_LIB_DIR ${LIBR_LIB_DIR} CACHE PATH "R shared libraries directory") -set(LIBR_CORE_LIBRARY ${LIBR_CORE_LIBRARY} CACHE PATH "R core shared library") - -# define find requirements -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(LibR DEFAULT_MSG - LIBR_HOME - LIBR_EXECUTABLE - LIBR_INCLUDE_DIRS - LIBR_LIB_DIR - LIBR_CORE_LIBRARY -) - -if(LIBR_FOUND) - message(STATUS "Found R: ${LIBR_EXECUTABLE}") -endif() diff --git a/ml-xgboost/cmake/modules/FindNVML.cmake b/ml-xgboost/cmake/modules/FindNVML.cmake deleted file mode 100644 index a4bed00..0000000 --- a/ml-xgboost/cmake/modules/FindNVML.cmake +++ /dev/null @@ -1,23 +0,0 @@ -if (NVML_LIBRARY) - unset(NVML_LIBRARY CACHE) -endif(NVML_LIBRARY) - -set(NVML_LIB_NAME nvml) - -find_path(NVML_INCLUDE_DIR - NAMES nvml.h - PATHS ${CUDA_HOME}/include ${CUDA_INCLUDE} /usr/local/cuda/include) - -find_library(NVML_LIBRARY - NAMES nvidia-ml) - -message(STATUS "Using nvml library: ${NVML_LIBRARY}") - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(NVML DEFAULT_MSG - NVML_INCLUDE_DIR NVML_LIBRARY) - -mark_as_advanced( - NVML_INCLUDE_DIR - NVML_LIBRARY -) diff --git a/ml-xgboost/cmake/modules/FindNccl.cmake b/ml-xgboost/cmake/modules/FindNccl.cmake deleted file mode 100644 index 643c45f..0000000 --- a/ml-xgboost/cmake/modules/FindNccl.cmake +++ /dev/null @@ -1,65 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Tries to find NCCL headers and libraries. -# -# Usage of this module as follows: -# -# find_package(NCCL) -# -# Variables used by this module, they can change the default behaviour and need -# to be set before calling find_package: -# -# NCCL_ROOT - When set, this path is inspected instead of standard library -# locations as the root of the NCCL installation. -# The environment variable NCCL_ROOT overrides this veriable. -# -# This module defines -# Nccl_FOUND, whether nccl has been found -# NCCL_INCLUDE_DIR, directory containing header -# NCCL_LIBRARY, directory containing nccl library -# NCCL_LIB_NAME, nccl library name -# -# This module assumes that the user has already called find_package(CUDA) - -if (NCCL_LIBRARY) - # Don't cache NCCL_LIBRARY to enable switching between static and shared. - unset(NCCL_LIBRARY CACHE) -endif() - -if (BUILD_WITH_SHARED_NCCL) - # libnccl.so - set(NCCL_LIB_NAME nccl) -else () - # libnccl_static.a - set(NCCL_LIB_NAME nccl_static) -endif (BUILD_WITH_SHARED_NCCL) - -find_path(NCCL_INCLUDE_DIR - NAMES nccl.h - PATHS $ENV{NCCL_ROOT}/include ${NCCL_ROOT}/include) - -find_library(NCCL_LIBRARY - NAMES ${NCCL_LIB_NAME} - PATHS $ENV{NCCL_ROOT}/lib/ ${NCCL_ROOT}/lib) - -message(STATUS "Using nccl library: ${NCCL_LIBRARY}") - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(Nccl DEFAULT_MSG - NCCL_INCLUDE_DIR NCCL_LIBRARY) - -mark_as_advanced( - NCCL_INCLUDE_DIR - NCCL_LIBRARY -) diff --git a/ml-xgboost/cmake/modules/FindTSan.cmake b/ml-xgboost/cmake/modules/FindTSan.cmake deleted file mode 100644 index aa01802..0000000 --- a/ml-xgboost/cmake/modules/FindTSan.cmake +++ /dev/null @@ -1,13 +0,0 @@ -set(TSan_LIB_NAME tsan) - -find_library(TSan_LIBRARY - NAMES libtsan.so libtsan.so.0 libtsan.so.0.0.0 - PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(TSan DEFAULT_MSG - TSan_LIBRARY) - -mark_as_advanced( - TSan_LIBRARY - TSan_LIB_NAME) diff --git a/ml-xgboost/cmake/modules/FindUBSan.cmake b/ml-xgboost/cmake/modules/FindUBSan.cmake deleted file mode 100644 index e1b72eb..0000000 --- a/ml-xgboost/cmake/modules/FindUBSan.cmake +++ /dev/null @@ -1,13 +0,0 @@ -set(UBSan_LIB_NAME UBSan) - -find_library(UBSan_LIBRARY - NAMES libubsan.so libubsan.so.5 libubsan.so.4 libubsan.so.3 libubsan.so.2 libubsan.so.1 libubsan.so.0 - PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(UBSan DEFAULT_MSG - UBSan_LIBRARY) - -mark_as_advanced( - UBSan_LIBRARY - UBSan_LIB_NAME) diff --git a/ml-xgboost/cmake/version_config.h.in b/ml-xgboost/cmake/version_config.h.in deleted file mode 100644 index dfde79a..0000000 --- a/ml-xgboost/cmake/version_config.h.in +++ /dev/null @@ -1,11 +0,0 @@ -/*! - * Copyright 2019 XGBoost contributors - */ -#ifndef XGBOOST_VERSION_CONFIG_H_ -#define XGBOOST_VERSION_CONFIG_H_ - -#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@ -#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@ -#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@ - -#endif // XGBOOST_VERSION_CONFIG_H_ diff --git a/ml-xgboost/cmake/xgboost-config.cmake.in b/ml-xgboost/cmake/xgboost-config.cmake.in deleted file mode 100644 index 6a155f0..0000000 --- a/ml-xgboost/cmake/xgboost-config.cmake.in +++ /dev/null @@ -1,5 +0,0 @@ -@PACKAGE_INIT@ - -if(NOT TARGET xgboost::xgboost) - include(${CMAKE_CURRENT_LIST_DIR}/XGBoostTargets.cmake) -endif() diff --git a/ml-xgboost/cub/.cproject b/ml-xgboost/cub/.cproject deleted file mode 100644 index 5e970a7..0000000 --- a/ml-xgboost/cub/.cproject +++ /dev/null @@ -1,1211 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ml-xgboost/cub/CHANGE_LOG.TXT b/ml-xgboost/cub/CHANGE_LOG.TXT deleted file mode 100644 index 837dcc2..0000000 --- a/ml-xgboost/cub/CHANGE_LOG.TXT +++ /dev/null @@ -1,346 +0,0 @@ -1.7.0 06/07/2017 - - Compatible with CUDA9 and SM7.x (Volta) independent thread scheduling - - API change: remove cub::WarpAll() and cub::WarpAny(). These functions served to - emulate __all and __any functionality for SM1.x devices, which did not have those - operations. However, the SM1.x devices are now deprecated in CUDA, and the - interfaces of the these two functions are now lacking the lane-mask needed - for collectives to run on Volta SMs having independent thread scheduling. - - Bug fixes: - - Issue #86 Incorrect results with ReduceByKey - -//----------------------------------------------------------------------------- - -1.6.4 12/06/2016 - - Updated sm_5x, sm_6x tuning policies for radix sorting (3.5B and 3.4B - 32b keys/s on TitanX and GTX 1080, respectively) - - Bug fixes: - - Restore fence work-around for scan (reduce-by-key, etc.) hangs - in CUDA 8.5 - - Issue 65: DeviceSegmentedRadixSort should allow inputs to have - pointer-to-const type - - Mollify Clang device-side warnings - - Remove out-dated VC project files - -//----------------------------------------------------------------------------- - -1.6.3 11/20/2016 - - API change: BlockLoad and BlockStore are now templated by the local - data type, instead of the Iterator type. This allows for output iterators - having \p void as their \p value_type (e.g., discard iterators). - - Updated GP100 tuning policies for radix sorting (6.2B 32b keys/s) - - Bug fixes: - - Issue #74: Warpreduce executes reduction operator for out-of-bounds items - - Issue #72 (cub:InequalityWrapper::operator() should be non-const) - - Issue #71 (KeyVairPair won't work if Key has non-trivial ctor) - - Issue #70 1.5.3 breaks BlockScan API. Retroactively reversioned - from v1.5.3 -> v1.6 to appropriately indicate API change. - - Issue #69 cub::BlockStore::Store doesn't compile if OutputIteratorT::value_type != T - - Issue #68 (cub::TilePrefixCallbackOp::WarpReduce doesn't permit ptx - arch specialization) - - Improved support for Win32 platforms (warnings, alignment, etc) - -//----------------------------------------------------------------------------- - -1.6.2 (was 1.5.5) 10/25/2016 - - Updated Pascal tuning policies for radix sorting - - Bug fixes: - - Fix for arm64 compilation of caching allocator - -//----------------------------------------------------------------------------- - -1.6.1 (was 1.5.4) 10/14/2016 - - Bug fixes: - - Fix for radix sorting bug introduced by scan refactorization - -//----------------------------------------------------------------------------- - -1.6.0 (was 1.5.3) 10/11/2016 - - API change: Device/block/warp-wide exclusive scans have been revised to now - accept an "initial value" (instead of an "identity value") for seeding the - computation with an arbitrary prefix. - - API change: Device-wide reductions and scans can now have input sequence types that are - different from output sequence types (as long as they are coercible) - value") for seeding the computation with an arbitrary prefix - - Reduce repository size (move doxygen binary to doc repository) - - Minor reductions in block-scan instruction count - - Bug fixes: - - Issue #55: warning in cub/device/dispatch/dispatch_reduce_by_key.cuh - - Issue #59: cub::DeviceScan::ExclusiveSum can't prefix sum of float into double - - Issue #58: Infinite loop in cub::CachingDeviceAllocator::NearestPowerOf - - Issue #47: Caching allocator needs to clean up cuda error upon successful retry - - Issue #46: Very high amount of needed memory from the cub::DeviceHistogram::HistogramEven routine - - Issue #45: Caching Device Allocator fails with debug output enabled - - Fix for generic-type reduce-by-key warpscan (sm3.x and newer) - -//----------------------------------------------------------------------------- - -1.5.2 03/21/2016 - - Improved medium-size scan performance for sm5x (Maxwell) - - Refactored caching allocator for device memory - - Spends less time locked - - Failure to allocate a block from the runtime will retry once after - freeing cached allocations - - Now respects max-bin (issue where blocks in excess of max-bin were - still being retained in free cache) - - Uses C++11 mutex when available - - Bug fixes: - - Fix for generic-type reduce-by-key warpscan (sm3.x and newer) - -//----------------------------------------------------------------------------- - -1.5.1 12/28/2015 - - Bug fixes: - - Fix for incorrect DeviceRadixSort output for some small problems on - Maxwell SM52 architectures - - Fix for macro redefinition warnings when compiling with Thrust sort - -//----------------------------------------------------------------------------- - -1.5.0 12/14/2015 - - New Features: - - Added new segmented device-wide operations for device-wide sort and - reduction primitives. - - Bug fixes: - - Fix for Git Issue 36 (Compilation error with GCC 4.8.4 nvcc 7.0.27) and - Forums thread (ThreadLoad generates compiler errors when loading from - pointer-to-const) - - Fix for Git Issue 29 (DeviceRadixSort::SortKeys yields compiler - errors) - - Fix for Git Issue 26 (CUDA error: misaligned address after - cub::DeviceRadixSort::SortKeys()) - - Fix for incorrect/crash on 0-length problems, e.g., Git Issue 25 (Floating - point exception (core dumped) during cub::DeviceRadixSort::SortKeys) - - Fix for CUDA 7.5 issues on SM 5.2 with SHFL-based warp-scan and warp-reduction - on non-primitive data types (e.g., user-defined structs) - - Fix for small radix sorting problems where 0 temporary bytes were - required and users code was invoking malloc(0) on some systems where - that returns NULL. (Impl assumed was asking for size again and was not - running the sort.) - -//----------------------------------------------------------------------------- - -1.4.1 04/13/2015 - - Bug fixes: - - Fixes for CUDA 7.0 issues with SHFL-based warp-scan and warp-reduction - on non-primitive data types (e.g., user-defined structs) - - Fixes for minor CUDA 7.0 performance regressions in cub::DeviceScan, - DeviceReduceByKey - - Fixes to allow cub::DeviceRadixSort and cub::BlockRadixSort on bool types - - Remove requirement for callers to define the CUB_CDP macro - when invoking CUB device-wide rountines using CUDA dynamic parallelism - - Fix for headers not being included in the proper order (or missing includes) - for some block-wide functions - -//----------------------------------------------------------------------------- - -1.4.0 03/18/2015 - - New Features: - - Support and performance tuning for new Maxwell GPU architectures - - Updated cub::DeviceHistogram implementation that provides the same - "histogram-even" and "histogram-range" functionality as IPP/NPP. - Provides extremely fast and, perhaps more importantly, very - uniform performance response across diverse real-world datasets, - including pathological (homogeneous) sample distributions (resilience) - - New cub::DeviceSpmv methods for multiplying sparse matrices by - dense vectors, load-balanced using a merge-based parallel decomposition. - - New cub::DeviceRadixSort sorting entry-points that always return - the sorted output into the specified buffer (as opposed to the - cub::DoubleBuffer in which it could end up in either buffer) - - New cub::DeviceRunLengthEncode::NonTrivialRuns for finding the starting - offsets and lengths of all non-trivial runs (i.e., length > 1) of keys in - a given sequence. (Useful for top-down partitioning algorithms like - MSD sorting of very-large keys.) - -//----------------------------------------------------------------------------- - -1.3.2 07/28/2014 - - Bug fixes: - - Fix for cub::DeviceReduce where reductions of small problems - (small enough to only dispatch a single threadblock) would run in - the default stream (stream zero) regardless of whether an alternate - stream was specified. - -//----------------------------------------------------------------------------- - -1.3.1 05/23/2014 - - Bug fixes: - - Workaround for a benign WAW race warning reported by cuda-memcheck - in BlockScan specialized for BLOCK_SCAN_WARP_SCANS algorithm. - - Fix for bug in DeviceRadixSort where the algorithm may sort more - key bits than the caller specified (up to the nearest radix digit). - - Fix for ~3% DeviceRadixSort performance regression on Kepler and - Fermi that was introduced in v1.3.0. - -//----------------------------------------------------------------------------- - -1.3.0 05/12/2014 - - New features: - - CUB's collective (block-wide, warp-wide) primitives underwent a minor - interface refactoring: - - To provide the appropriate support for multidimensional thread blocks, - The interfaces for collective classes are now template-parameterized - by X, Y, and Z block dimensions (with BLOCK_DIM_Y and BLOCK_DIM_Z being - optional, and BLOCK_DIM_X replacing BLOCK_THREADS). Furthermore, the - constructors that accept remapped linear thread-identifiers have been - removed: all primitives now assume a row-major thread-ranking for - multidimensional thread blocks. - - To allow the host program (compiled by the host-pass) to - accurately determine the device-specific storage requirements for - a given collective (compiled for each device-pass), the interfaces - for collective classes are now (optionally) template-parameterized - by the desired PTX compute capability. This is useful when - aliasing collective storage to shared memory that has been - allocated dynamically by the host at the kernel call site. - - Most CUB programs having typical 1D usage should not require any - changes to accomodate these updates. - - Added new "combination" WarpScan methods for efficiently computing - both inclusive and exclusive prefix scans (and sums). - - Bug fixes: - - Fixed bug in cub::WarpScan (which affected cub::BlockScan and - cub::DeviceScan) where incorrect results (e.g., NAN) would often be - returned when parameterized for floating-point types (fp32, fp64). - - Workaround-fix for ptxas error when compiling with with -G flag on Linux - (for debug instrumentation) - - Misc. workaround-fixes for certain scan scenarios (using custom - scan operators) where code compiled for SM1x is run on newer - GPUs of higher compute-capability: the compiler could not tell - which memory space was being used collective operations and was - mistakenly using global ops instead of shared ops. - -//----------------------------------------------------------------------------- - -1.2.3 04/01/2014 - - Bug fixes: - - Fixed access violation bug in DeviceReduce::ReduceByKey for non-primitive value types - - Fixed code-snippet bug in ArgIndexInputIteratorT documentation - -//----------------------------------------------------------------------------- - -1.2.2 03/03/2014 - - New features: - - Added MS VC++ project solutions for device-wide and block-wide examples - - Performance: - - Added a third algorithmic variant of cub::BlockReduce for improved performance - when using commutative operators (e.g., numeric addition) - - Bug fixes: - - Fixed bug where inclusion of Thrust headers in a certain order prevented CUB device-wide primitives from working properly - -//----------------------------------------------------------------------------- - -1.2.0 02/25/2014 - - New features: - - Added device-wide reduce-by-key (DeviceReduce::ReduceByKey, DeviceReduce::RunLengthEncode) - - Performance - - Improved DeviceScan, DeviceSelect, DevicePartition performance - - Documentation and testing: - - Compatible with CUDA 6.0 - - Added performance-portability plots for many device-wide primitives to doc - - Update doc and tests to reflect iterator (in)compatibilities with CUDA 5.0 (and older) and Thrust 1.6 (and older). - - Bug fixes - - Revised the operation of temporary tile status bookkeeping for DeviceScan (and similar) to be safe for current code run on future platforms (now uses proper fences) - - Fixed DeviceScan bug where Win32 alignment disagreements between host and device regarding user-defined data types would corrupt tile status - - Fixed BlockScan bug where certain exclusive scans on custom data types for the BLOCK_SCAN_WARP_SCANS variant would return incorrect results for the first thread in the block - - Added workaround for TexRefInputIteratorTto work with CUDA 6.0 - -//----------------------------------------------------------------------------- - -1.1.1 12/11/2013 - - New features: - - Added TexObjInputIteratorT, TexRefInputIteratorT, CacheModifiedInputIteratorT, and CacheModifiedOutputIterator types for loading & storing arbitrary types through the cache hierarchy. Compatible with Thrust API. - - Added descending sorting to DeviceRadixSort and BlockRadixSort - - Added min, max, arg-min, and arg-max to DeviceReduce - - Added DeviceSelect (select-unique, select-if, and select-flagged) - - Added DevicePartition (partition-if, partition-flagged) - - Added generic cub::ShuffleUp(), cub::ShuffleDown(), and cub::ShuffleIndex() for warp-wide communication of arbitrary data types (SM3x+) - - Added cub::MaxSmOccupancy() for accurately determining SM occupancy for any given kernel function pointer - - Performance - - Improved DeviceScan and DeviceRadixSort performance for older architectures (SM10-SM30) - - Interface changes: - - Refactored block-wide I/O (BlockLoad and BlockStore), removing cache-modifiers from their interfaces. The CacheModifiedInputIteratorTand CacheModifiedOutputIterator should now be used with BlockLoad and BlockStore to effect that behavior. - - Rename device-wide "stream_synchronous" param to "debug_synchronous" to avoid confusion about usage - - Documentation and testing: - - Added simple examples of device-wide methods - - Improved doxygen documentation and example snippets - - Improved test coverege to include up to 21,000 kernel variants and 851,000 unit tests (per architecture, per platform) - - Bug fixes - - Fixed misc DeviceScan, BlockScan, DeviceReduce, and BlockReduce bugs when operating on non-primitive types for older architectures SM10-SM13 - - Fixed DeviceScan / WarpReduction bug: SHFL-based segmented reduction producting incorrect results for multi-word types (size > 4B) on Linux - - Fixed BlockScan bug: For warpscan-based scans, not all threads in the first warp were entering the prefix callback functor - - Fixed DeviceRadixSort bug: race condition with key-value pairs for pre-SM35 architectures - - Fixed DeviceRadixSort bug: incorrect bitfield-extract behavior with long keys on 64bit Linux - - Fixed BlockDiscontinuity bug: complation error in for types other than int32/uint32 - - CDP (device-callable) versions of device-wide methods now report the same temporary storage allocation size requirement as their host-callable counterparts - - -//----------------------------------------------------------------------------- - -1.0.2 08/23/2013 - - Corrections to code snippet examples for BlockLoad, BlockStore, and BlockDiscontinuity - - Cleaned up unnecessary/missing header includes. You can now safely #inlude a specific .cuh (instead of cub.cuh) - - Bug/compilation fixes for BlockHistogram - -//----------------------------------------------------------------------------- - -1.0.1 08/08/2013 - - New collective interface idiom (specialize::construct::invoke). - - Added best-in-class DeviceRadixSort. Implements short-circuiting for homogenous digit passes. - - Added best-in-class DeviceScan. Implements single-pass "adaptive-lookback" strategy. - - Significantly improved documentation (with example code snippets) - - More extensive regression test suit for aggressively testing collective variants - - Allow non-trially-constructed types (previously unions had prevented aliasing temporary storage of those types) - - Improved support for Kepler SHFL (collective ops now use SHFL for types larger than 32b) - - Better code generation for 64-bit addressing within BlockLoad/BlockStore - - DeviceHistogram now supports histograms of arbitrary bins - - Misc. fixes - - Workarounds for SM10 codegen issues in uncommonly-used WarpScan/Reduce specializations - - Updates to accommodate CUDA 5.5 dynamic parallelism - - -//----------------------------------------------------------------------------- - -0.9.4 05/07/2013 - - - Fixed compilation errors for SM10-SM13 - - Fixed compilation errors for some WarpScan entrypoints on SM30+ - - Added block-wide histogram (BlockHistogram256) - - Added device-wide histogram (DeviceHistogram256) - - Added new BlockScan algorithm variant BLOCK_SCAN_RAKING_MEMOIZE, which - trades more register consumption for less shared memory I/O) - - Updates to BlockRadixRank to use BlockScan (which improves performance - on Kepler due to SHFL instruction) - - Allow types other than C++ primitives to be used in WarpScan::*Sum methods - if they only have operator + overloaded. (Previously they also required - to support assignment from int(0).) - - Update BlockReduce's BLOCK_REDUCE_WARP_REDUCTIONS algorithm to work even - when block size is not an even multiple of warp size - - Added work management utility descriptors (GridQueue, GridEvenShare) - - Refactoring of DeviceAllocator interface and CachingDeviceAllocator - implementation - - Misc. documentation updates and corrections. - -//----------------------------------------------------------------------------- - -0.9.2 04/04/2013 - - - Added WarpReduce. WarpReduce uses the SHFL instruction when applicable. - BlockReduce now uses this WarpReduce instead of implementing its own. - - Misc. fixes for 64-bit Linux compilation warnings and errors. - - Misc. documentation updates and corrections. - -//----------------------------------------------------------------------------- - -0.9.1 03/09/2013 - - - Fix for ambiguity in BlockScan::Reduce() between generic reduction and - summation. Summation entrypoints are now called ::Sum(), similar to the - convention in BlockScan. - - Small edits to mainpage documentation and download tracking - -//----------------------------------------------------------------------------- - -0.9.0 03/07/2013 - - - Intial "preview" release. CUB is the first durable, high-performance library - of cooperative block-level, warp-level, and thread-level primitives for CUDA - kernel programming. More primitives and examples coming soon! - \ No newline at end of file diff --git a/ml-xgboost/cub/LICENSE.TXT b/ml-xgboost/cub/LICENSE.TXT deleted file mode 100644 index 9ba3b78..0000000 --- a/ml-xgboost/cub/LICENSE.TXT +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2010-2011, Duane Merrill. All rights reserved. -Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the NVIDIA CORPORATION nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/ml-xgboost/cub/README.md b/ml-xgboost/cub/README.md deleted file mode 100644 index 1ffcf84..0000000 --- a/ml-xgboost/cub/README.md +++ /dev/null @@ -1,128 +0,0 @@ -
-

About CUB

- -Current release: v1.6.4 (12/06/2016) - -We recommend the [CUB Project Website](http://nvlabs.github.com/cub) and the [cub-users discussion forum](http://groups.google.com/group/cub-users) for further information and examples. - -CUB provides state-of-the-art, reusable software components for every layer -of the CUDA programming model: -- [Device-wide primitives] (https://nvlabs.github.com/cub/group___device_module.html) - - Sort, prefix scan, reduction, histogram, etc. - - Compatible with CUDA dynamic parallelism -- [Block-wide "collective" primitives] (https://nvlabs.github.com/cub/group___block_module.html) - - I/O, sort, prefix scan, reduction, histogram, etc. - - Compatible with arbitrary thread block sizes and types -- [Warp-wide "collective" primitives] (https://nvlabs.github.com/cub/group___warp_module.html) - - Warp-wide prefix scan, reduction, etc. - - Safe and architecture-specific -- [Thread and resource utilities](https://nvlabs.github.com/cub/group___thread_module.html) - - PTX intrinsics, device reflection, texture-caching iterators, caching memory allocators, etc. - -![Orientation of collective primitives within the CUDA software stack](http://nvlabs.github.com/cub/cub_overview.png) - -

-

A Simple Example

- -```C++ -#include - -// Block-sorting CUDA kernel -__global__ void BlockSortKernel(int *d_in, int *d_out) -{ - using namespace cub; - - // Specialize BlockRadixSort, BlockLoad, and BlockStore for 128 threads - // owning 16 integer items each - typedef BlockRadixSort BlockRadixSort; - typedef BlockLoad BlockLoad; - typedef BlockStore BlockStore; - - // Allocate shared memory - __shared__ union { - typename BlockRadixSort::TempStorage sort; - typename BlockLoad::TempStorage load; - typename BlockStore::TempStorage store; - } temp_storage; - - int block_offset = blockIdx.x * (128 * 16); // OffsetT for this block's ment - - // Obtain a segment of 2048 consecutive keys that are blocked across threads - int thread_keys[16]; - BlockLoad(temp_storage.load).Load(d_in + block_offset, thread_keys); - __syncthreads(); - - // Collectively sort the keys - BlockRadixSort(temp_storage.sort).Sort(thread_keys); - __syncthreads(); - - // Store the sorted segment - BlockStore(temp_storage.store).Store(d_out + block_offset, thread_keys); -} -``` - -Each thread block uses cub::BlockRadixSort to collectively sort -its own input segment. The class is specialized by the -data type being sorted, by the number of threads per block, by the number of -keys per thread, and implicitly by the targeted compilation architecture. - -The cub::BlockLoad and cub::BlockStore classes are similarly specialized. -Furthermore, to provide coalesced accesses to device memory, these primitives are -configured to access memory using a striped access pattern (where consecutive threads -simultaneously access consecutive items) and then transpose the keys into -a [blocked arrangement](index.html#sec4sec3) of elements across threads. - -Once specialized, these classes expose opaque \p TempStorage member types. -The thread block uses these storage types to statically allocate the union of -shared memory needed by the thread block. (Alternatively these storage types -could be aliased to global memory allocations). - -

-

Stable Releases

- -CUB releases are labeled using version identifiers having three fields: -*epoch.feature.update*. The *epoch* field corresponds to support for -a major change in the CUDA programming model. The *feature* field -corresponds to a stable set of features, functionality, and interface. The -*update* field corresponds to a bug-fix or performance update for that -feature set. At the moment, we do not publicly provide non-stable releases -such as development snapshots, beta releases or rolling releases. (Feel free -to contact us if you would like such things.) See the -[CUB Project Website](http://nvlabs.github.com/cub) for more information. - -

-

Contributors

- -CUB is developed as an open-source project by [NVIDIA Research](http://research.nvidia.com). The primary contributor is [Duane Merrill](http://github.com/dumerrill). - -

-

Open Source License

- -CUB is available under the "New BSD" open-source license: - -``` -Copyright (c) 2010-2011, Duane Merrill. All rights reserved. -Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the NVIDIA CORPORATION nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -``` diff --git a/ml-xgboost/cub/common.mk b/ml-xgboost/cub/common.mk deleted file mode 100644 index 8154850..0000000 --- a/ml-xgboost/cub/common.mk +++ /dev/null @@ -1,233 +0,0 @@ -#/****************************************************************************** -# * Copyright (c) 2011, Duane Merrill. All rights reserved. -# * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. -# * -# * Redistribution and use in source and binary forms, with or without -# * modification, are permitted provided that the following conditions are met: -# * * Redistributions of source code must retain the above copyright -# * notice, this list of conditions and the following disclaimer. -# * * Redistributions in binary form must reproduce the above copyright -# * notice, this list of conditions and the following disclaimer in the -# * documentation and/or other materials provided with the distribution. -# * * Neither the name of the NVIDIA CORPORATION nor the -# * names of its contributors may be used to endorse or promote products -# * derived from this software without specific prior written permission. -# * -# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY -# * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# * -#******************************************************************************/ - - -#------------------------------------------------------------------------------- -# Commandline Options -#------------------------------------------------------------------------------- - -# [sm=] Compute-capability to compile for, e.g., "sm=200,300,350" (SM20 by default). - -COMMA = , -ifdef sm - SM_ARCH = $(subst $(COMMA),-,$(sm)) -else - SM_ARCH = 200 -endif - -ifeq (700, $(findstring 700, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_70,code=\"sm_70,compute_70\" - SM_DEF += -DSM700 - TEST_ARCH = 700 -endif -ifeq (620, $(findstring 620, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_62,code=\"sm_62,compute_62\" - SM_DEF += -DSM620 - TEST_ARCH = 620 -endif -ifeq (610, $(findstring 610, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_61,code=\"sm_61,compute_61\" - SM_DEF += -DSM610 - TEST_ARCH = 610 -endif -ifeq (600, $(findstring 600, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_60,code=\"sm_60,compute_60\" - SM_DEF += -DSM600 - TEST_ARCH = 600 -endif -ifeq (520, $(findstring 520, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_52,code=\"sm_52,compute_52\" - SM_DEF += -DSM520 - TEST_ARCH = 520 -endif -ifeq (370, $(findstring 370, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_37,code=\"sm_37,compute_37\" - SM_DEF += -DSM370 - TEST_ARCH = 370 -endif -ifeq (350, $(findstring 350, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_35,code=\"sm_35,compute_35\" - SM_DEF += -DSM350 - TEST_ARCH = 350 -endif -ifeq (300, $(findstring 300, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_30,code=\"sm_30,compute_30\" - SM_DEF += -DSM300 - TEST_ARCH = 300 -endif -ifeq (210, $(findstring 210, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_20,code=\"sm_21,compute_20\" - SM_DEF += -DSM210 - TEST_ARCH = 210 -endif -ifeq (200, $(findstring 200, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_20,code=\"sm_20,compute_20\" - SM_DEF += -DSM200 - TEST_ARCH = 200 -endif -ifeq (130, $(findstring 130, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_13,code=\"sm_13,compute_13\" - SM_DEF += -DSM130 - TEST_ARCH = 130 -endif -ifeq (120, $(findstring 120, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_12,code=\"sm_12,compute_12\" - SM_DEF += -DSM120 - TEST_ARCH = 120 -endif -ifeq (110, $(findstring 110, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_11,code=\"sm_11,compute_11\" - SM_DEF += -DSM110 - TEST_ARCH = 110 -endif -ifeq (100, $(findstring 100, $(SM_ARCH))) - SM_TARGETS += -gencode=arch=compute_10,code=\"sm_10,compute_10\" - SM_DEF += -DSM100 - TEST_ARCH = 100 -endif - - -# [cdp=<0|1>] CDP enable option (default: no) -ifeq ($(cdp), 1) - DEFINES += -DCUB_CDP - CDP_SUFFIX = cdp - NVCCFLAGS += -rdc=true -lcudadevrt -else - CDP_SUFFIX = nocdp -endif - - -# [force32=<0|1>] Device addressing mode option (64-bit device pointers by default) -ifeq ($(force32), 1) - CPU_ARCH = -m32 - CPU_ARCH_SUFFIX = i386 -else - CPU_ARCH = -m64 - CPU_ARCH_SUFFIX = x86_64 - NPPI = -lnppi -endif - - -# [abi=<0|1>] CUDA ABI option (enabled by default) -ifneq ($(abi), 0) - ABI_SUFFIX = abi -else - NVCCFLAGS += -Xptxas -abi=no - ABI_SUFFIX = noabi -endif - - -# [open64=<0|1>] Middle-end compiler option (nvvm by default) -ifeq ($(open64), 1) - NVCCFLAGS += -open64 - PTX_SUFFIX = open64 -else - PTX_SUFFIX = nvvm -endif - - -# [verbose=<0|1>] Verbose toolchain output from nvcc option -ifeq ($(verbose), 1) - NVCCFLAGS += -v -endif - - -# [keep=<0|1>] Keep intermediate compilation artifacts option -ifeq ($(keep), 1) - NVCCFLAGS += -keep -endif - -# [debug=<0|1>] Generate debug mode code -ifeq ($(debug), 1) - NVCCFLAGS += -G -endif - - -#------------------------------------------------------------------------------- -# Compiler and compilation platform -#------------------------------------------------------------------------------- - -CUB_DIR = $(dir $(lastword $(MAKEFILE_LIST))) - -NVCC = "$(shell which nvcc)" -ifdef nvccver - NVCC_VERSION = $(nvccver) -else - NVCC_VERSION = $(strip $(shell nvcc --version | grep release | sed 's/.*release //' | sed 's/,.*//')) -endif - -# detect OS -OSUPPER = $(shell uname -s 2>/dev/null | tr [:lower:] [:upper:]) - -# Default flags: verbose kernel properties (regs, smem, cmem, etc.); runtimes for compilation phases -NVCCFLAGS += $(SM_DEF) -Xptxas -v -Xcudafe -\# - -ifeq (WIN_NT, $(findstring WIN_NT, $(OSUPPER))) - # For MSVC - # Enable more warnings and treat as errors - NVCCFLAGS += -Xcompiler /W3 -Xcompiler /WX - # Disable excess x86 floating point precision that can lead to results being labeled incorrectly - NVCCFLAGS += -Xcompiler /fp:strict - # Help the compiler/linker work with huge numbers of kernels on Windows - NVCCFLAGS += -Xcompiler /bigobj -Xcompiler /Zm500 - CC = cl - - # Multithreaded runtime - NVCCFLAGS += -Xcompiler /MT - -ifneq ($(force32), 1) - CUDART_CYG = "$(shell dirname $(NVCC))/../lib/Win32/cudart.lib" -else - CUDART_CYG = "$(shell dirname $(NVCC))/../lib/x64/cudart.lib" -endif - CUDART = "$(shell cygpath -w $(CUDART_CYG))" -else - # For g++ - # Disable excess x86 floating point precision that can lead to results being labeled incorrectly - NVCCFLAGS += -Xcompiler -ffloat-store - CC = g++ -ifneq ($(force32), 1) - CUDART = "$(shell dirname $(NVCC))/../lib/libcudart_static.a" -else - CUDART = "$(shell dirname $(NVCC))/../lib64/libcudart_static.a" -endif -endif - -# Suffix to append to each binary -BIN_SUFFIX = sm$(SM_ARCH)_$(PTX_SUFFIX)_$(NVCC_VERSION)_$(ABI_SUFFIX)_$(CDP_SUFFIX)_$(CPU_ARCH_SUFFIX) - - -#------------------------------------------------------------------------------- -# Dependency Lists -#------------------------------------------------------------------------------- - -rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) - -CUB_DEPS = $(call rwildcard, $(CUB_DIR),*.cuh) \ - $(CUB_DIR)common.mk - diff --git a/ml-xgboost/cub/cub/agent/agent_histogram.cuh b/ml-xgboost/cub/cub/agent/agent_histogram.cuh deleted file mode 100644 index e42ffe2..0000000 --- a/ml-xgboost/cub/cub/agent/agent_histogram.cuh +++ /dev/null @@ -1,783 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::AgentHistogram implements a stateful abstraction of CUDA thread blocks for participating in device-wide histogram . - */ - -#pragma once - -#include - -#include "../util_type.cuh" -#include "../block/block_load.cuh" -#include "../grid/grid_queue.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Tuning policy - ******************************************************************************/ - -/** - * - */ -enum BlockHistogramMemoryPreference -{ - GMEM, - SMEM, - BLEND -}; - - -/** - * Parameterizable tuning policy type for AgentHistogram - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _PIXELS_PER_THREAD, ///< Pixels per thread (per tile of input) - BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use - CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements - bool _RLE_COMPRESS, ///< Whether to perform localized RLE to compress samples before histogramming - BlockHistogramMemoryPreference _MEM_PREFERENCE, ///< Whether to prefer privatized shared-memory bins (versus privatized global-memory bins) - bool _WORK_STEALING> ///< Whether to dequeue tiles from a global work queue -struct AgentHistogramPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - PIXELS_PER_THREAD = _PIXELS_PER_THREAD, ///< Pixels per thread (per tile of input) - IS_RLE_COMPRESS = _RLE_COMPRESS, ///< Whether to perform localized RLE to compress samples before histogramming - MEM_PREFERENCE = _MEM_PREFERENCE, ///< Whether to prefer privatized shared-memory bins (versus privatized global-memory bins) - IS_WORK_STEALING = _WORK_STEALING, ///< Whether to dequeue tiles from a global work queue - }; - - static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use - static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements -}; - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - -/** - * \brief AgentHistogram implements a stateful abstraction of CUDA thread blocks for participating in device-wide histogram . - */ -template < - typename AgentHistogramPolicyT, ///< Parameterized AgentHistogramPolicy tuning policy type - int PRIVATIZED_SMEM_BINS, ///< Number of privatized shared-memory histogram bins of any channel. Zero indicates privatized counters to be maintained in device-accessible memory. - int NUM_CHANNELS, ///< Number of channels interleaved in the input data. Supports up to four channels. - int NUM_ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed - typename SampleIteratorT, ///< Random-access input iterator type for reading samples - typename CounterT, ///< Integer type for counting sample occurrences per histogram bin - typename PrivatizedDecodeOpT, ///< The transform operator type for determining privatized counter indices from samples, one for each channel - typename OutputDecodeOpT, ///< The transform operator type for determining output bin-ids from privatized counter indices, one for each channel - typename OffsetT, ///< Signed integer type for global offsets - int PTX_ARCH = CUB_PTX_ARCH> ///< PTX compute capability -struct AgentHistogram -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - /// The sample type of the input iterator - typedef typename std::iterator_traits::value_type SampleT; - - /// The pixel type of SampleT - typedef typename CubVector::Type PixelT; - - /// The quad type of SampleT - typedef typename CubVector::Type QuadT; - - /// Constants - enum - { - BLOCK_THREADS = AgentHistogramPolicyT::BLOCK_THREADS, - - PIXELS_PER_THREAD = AgentHistogramPolicyT::PIXELS_PER_THREAD, - SAMPLES_PER_THREAD = PIXELS_PER_THREAD * NUM_CHANNELS, - QUADS_PER_THREAD = SAMPLES_PER_THREAD / 4, - - TILE_PIXELS = PIXELS_PER_THREAD * BLOCK_THREADS, - TILE_SAMPLES = SAMPLES_PER_THREAD * BLOCK_THREADS, - - IS_RLE_COMPRESS = AgentHistogramPolicyT::IS_RLE_COMPRESS, - - MEM_PREFERENCE = (PRIVATIZED_SMEM_BINS > 0) ? - AgentHistogramPolicyT::MEM_PREFERENCE : - GMEM, - - IS_WORK_STEALING = AgentHistogramPolicyT::IS_WORK_STEALING, - }; - - /// Cache load modifier for reading input elements - static const CacheLoadModifier LOAD_MODIFIER = AgentHistogramPolicyT::LOAD_MODIFIER; - - - /// Input iterator wrapper type (for applying cache modifier) - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedInputIterator - SampleIteratorT>::Type // Directly use the supplied input iterator type - WrappedSampleIteratorT; - - /// Pixel input iterator type (for applying cache modifier) - typedef CacheModifiedInputIterator - WrappedPixelIteratorT; - - /// Qaud input iterator type (for applying cache modifier) - typedef CacheModifiedInputIterator - WrappedQuadIteratorT; - - /// Parameterized BlockLoad type for samples - typedef BlockLoad< - SampleT, - BLOCK_THREADS, - SAMPLES_PER_THREAD, - AgentHistogramPolicyT::LOAD_ALGORITHM> - BlockLoadSampleT; - - /// Parameterized BlockLoad type for pixels - typedef BlockLoad< - PixelT, - BLOCK_THREADS, - PIXELS_PER_THREAD, - AgentHistogramPolicyT::LOAD_ALGORITHM> - BlockLoadPixelT; - - /// Parameterized BlockLoad type for quads - typedef BlockLoad< - QuadT, - BLOCK_THREADS, - QUADS_PER_THREAD, - AgentHistogramPolicyT::LOAD_ALGORITHM> - BlockLoadQuadT; - - /// Shared memory type required by this thread block - struct _TempStorage - { - CounterT histograms[NUM_ACTIVE_CHANNELS][PRIVATIZED_SMEM_BINS + 1]; // Smem needed for block-privatized smem histogram (with 1 word of padding) - - int tile_idx; - - union - { - typename BlockLoadSampleT::TempStorage sample_load; // Smem needed for loading a tile of samples - typename BlockLoadPixelT::TempStorage pixel_load; // Smem needed for loading a tile of pixels - typename BlockLoadQuadT::TempStorage quad_load; // Smem needed for loading a tile of quads - }; - }; - - - /// Temporary storage type (unionable) - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - /// Reference to temp_storage - _TempStorage &temp_storage; - - /// Sample input iterator (with cache modifier applied, if possible) - WrappedSampleIteratorT d_wrapped_samples; - - /// Native pointer for input samples (possibly NULL if unavailable) - SampleT* d_native_samples; - - /// The number of output bins for each channel - int (&num_output_bins)[NUM_ACTIVE_CHANNELS]; - - /// The number of privatized bins for each channel - int (&num_privatized_bins)[NUM_ACTIVE_CHANNELS]; - - /// Reference to gmem privatized histograms for each channel - CounterT* d_privatized_histograms[NUM_ACTIVE_CHANNELS]; - - /// Reference to final output histograms (gmem) - CounterT* (&d_output_histograms)[NUM_ACTIVE_CHANNELS]; - - /// The transform operator for determining output bin-ids from privatized counter indices, one for each channel - OutputDecodeOpT (&output_decode_op)[NUM_ACTIVE_CHANNELS]; - - /// The transform operator for determining privatized counter indices from samples, one for each channel - PrivatizedDecodeOpT (&privatized_decode_op)[NUM_ACTIVE_CHANNELS]; - - /// Whether to prefer privatized smem counters vs privatized global counters - bool prefer_smem; - - - //--------------------------------------------------------------------- - // Initialize privatized bin counters - //--------------------------------------------------------------------- - - // Initialize privatized bin counters - __device__ __forceinline__ void InitBinCounters(CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS]) - { - // Initialize histogram bin counts to zeros - #pragma unroll - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - { - for (int privatized_bin = threadIdx.x; privatized_bin < num_privatized_bins[CHANNEL]; privatized_bin += BLOCK_THREADS) - { - privatized_histograms[CHANNEL][privatized_bin] = 0; - } - } - - // Barrier to make sure all threads are done updating counters - CTA_SYNC(); - } - - - // Initialize privatized bin counters. Specialized for privatized shared-memory counters - __device__ __forceinline__ void InitSmemBinCounters() - { - CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS]; - - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - privatized_histograms[CHANNEL] = temp_storage.histograms[CHANNEL]; - - InitBinCounters(privatized_histograms); - } - - - // Initialize privatized bin counters. Specialized for privatized global-memory counters - __device__ __forceinline__ void InitGmemBinCounters() - { - InitBinCounters(d_privatized_histograms); - } - - - //--------------------------------------------------------------------- - // Update final output histograms - //--------------------------------------------------------------------- - - // Update final output histograms from privatized histograms - __device__ __forceinline__ void StoreOutput(CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS]) - { - // Barrier to make sure all threads are done updating counters - CTA_SYNC(); - - // Apply privatized bin counts to output bin counts - #pragma unroll - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - { - int channel_bins = num_privatized_bins[CHANNEL]; - for (int privatized_bin = threadIdx.x; - privatized_bin < channel_bins; - privatized_bin += BLOCK_THREADS) - { - int output_bin = -1; - CounterT count = privatized_histograms[CHANNEL][privatized_bin]; - bool is_valid = count > 0; - - output_decode_op[CHANNEL].BinSelect((SampleT) privatized_bin, output_bin, is_valid); - - if (output_bin >= 0) - { - atomicAdd(&d_output_histograms[CHANNEL][output_bin], count); - } - - } - } - } - - - // Update final output histograms from privatized histograms. Specialized for privatized shared-memory counters - __device__ __forceinline__ void StoreSmemOutput() - { - CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS]; - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - privatized_histograms[CHANNEL] = temp_storage.histograms[CHANNEL]; - - StoreOutput(privatized_histograms); - } - - - // Update final output histograms from privatized histograms. Specialized for privatized global-memory counters - __device__ __forceinline__ void StoreGmemOutput() - { - StoreOutput(d_privatized_histograms); - } - - - //--------------------------------------------------------------------- - // Tile accumulation - //--------------------------------------------------------------------- - - // Accumulate pixels. Specialized for RLE compression. - __device__ __forceinline__ void AccumulatePixels( - SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS], - bool is_valid[PIXELS_PER_THREAD], - CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS], - Int2Type is_rle_compress) - { - - #pragma unroll - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - { - // Bin pixels - int bins[PIXELS_PER_THREAD]; - - #pragma unroll - for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD; ++PIXEL) - { - bins[PIXEL] = -1; - privatized_decode_op[CHANNEL].BinSelect(samples[PIXEL][CHANNEL], bins[PIXEL], is_valid[PIXEL]); - } - - CounterT accumulator = 1; - - #pragma unroll - for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD - 1; ++PIXEL) - { - if (bins[PIXEL] == bins[PIXEL + 1]) - { - accumulator++; - } - else - { - if (bins[PIXEL] >= 0) - atomicAdd(privatized_histograms[CHANNEL] + bins[PIXEL], accumulator); - - accumulator = 1; - } - } - // Last pixel - if (bins[PIXELS_PER_THREAD - 1] >= 0) - atomicAdd(privatized_histograms[CHANNEL] + bins[PIXELS_PER_THREAD - 1], accumulator); - } - } - - - // Accumulate pixels. Specialized for individual accumulation of each pixel. - __device__ __forceinline__ void AccumulatePixels( - SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS], - bool is_valid[PIXELS_PER_THREAD], - CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS], - Int2Type is_rle_compress) - { - #pragma unroll - for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD; ++PIXEL) - { - #pragma unroll - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - { - int bin = -1; - privatized_decode_op[CHANNEL].BinSelect(samples[PIXEL][CHANNEL], bin, is_valid[PIXEL]); - if (bin >= 0) - atomicAdd(privatized_histograms[CHANNEL] + bin, 1); - } - } - } - - - /** - * Accumulate pixel, specialized for smem privatized histogram - */ - __device__ __forceinline__ void AccumulateSmemPixels( - SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS], - bool is_valid[PIXELS_PER_THREAD]) - { - CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS]; - - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - privatized_histograms[CHANNEL] = temp_storage.histograms[CHANNEL]; - - AccumulatePixels(samples, is_valid, privatized_histograms, Int2Type()); - } - - - /** - * Accumulate pixel, specialized for gmem privatized histogram - */ - __device__ __forceinline__ void AccumulateGmemPixels( - SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS], - bool is_valid[PIXELS_PER_THREAD]) - { - AccumulatePixels(samples, is_valid, d_privatized_histograms, Int2Type()); - } - - - - //--------------------------------------------------------------------- - // Tile loading - //--------------------------------------------------------------------- - - // Load full, aligned tile using pixel iterator (multi-channel) - template - __device__ __forceinline__ void LoadFullAlignedTile( - OffsetT block_offset, - int valid_samples, - SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], - Int2Type<_NUM_ACTIVE_CHANNELS> num_active_channels) - { - typedef PixelT AliasedPixels[PIXELS_PER_THREAD]; - - WrappedPixelIteratorT d_wrapped_pixels((PixelT*) (d_native_samples + block_offset)); - - // Load using a wrapped pixel iterator - BlockLoadPixelT(temp_storage.pixel_load).Load( - d_wrapped_pixels, - reinterpret_cast(samples)); - } - - // Load full, aligned tile using quad iterator (single-channel) - __device__ __forceinline__ void LoadFullAlignedTile( - OffsetT block_offset, - int valid_samples, - SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], - Int2Type<1> num_active_channels) - { - typedef QuadT AliasedQuads[QUADS_PER_THREAD]; - - WrappedQuadIteratorT d_wrapped_quads((QuadT*) (d_native_samples + block_offset)); - - // Load using a wrapped quad iterator - BlockLoadQuadT(temp_storage.quad_load).Load( - d_wrapped_quads, - reinterpret_cast(samples)); - } - - // Load full, aligned tile - __device__ __forceinline__ void LoadTile( - OffsetT block_offset, - int valid_samples, - SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], - Int2Type is_full_tile, - Int2Type is_aligned) - { - LoadFullAlignedTile(block_offset, valid_samples, samples, Int2Type()); - } - - // Load full, mis-aligned tile using sample iterator - __device__ __forceinline__ void LoadTile( - OffsetT block_offset, - int valid_samples, - SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], - Int2Type is_full_tile, - Int2Type is_aligned) - { - typedef SampleT AliasedSamples[SAMPLES_PER_THREAD]; - - // Load using sample iterator - BlockLoadSampleT(temp_storage.sample_load).Load( - d_wrapped_samples + block_offset, - reinterpret_cast(samples)); - } - - // Load partially-full, aligned tile using the pixel iterator - __device__ __forceinline__ void LoadTile( - OffsetT block_offset, - int valid_samples, - SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], - Int2Type is_full_tile, - Int2Type is_aligned) - { - typedef PixelT AliasedPixels[PIXELS_PER_THREAD]; - - WrappedPixelIteratorT d_wrapped_pixels((PixelT*) (d_native_samples + block_offset)); - - int valid_pixels = valid_samples / NUM_CHANNELS; - - // Load using a wrapped pixel iterator - BlockLoadPixelT(temp_storage.pixel_load).Load( - d_wrapped_pixels, - reinterpret_cast(samples), - valid_pixels); - } - - // Load partially-full, mis-aligned tile using sample iterator - __device__ __forceinline__ void LoadTile( - OffsetT block_offset, - int valid_samples, - SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], - Int2Type is_full_tile, - Int2Type is_aligned) - { - typedef SampleT AliasedSamples[SAMPLES_PER_THREAD]; - - BlockLoadSampleT(temp_storage.sample_load).Load( - d_wrapped_samples + block_offset, - reinterpret_cast(samples), - valid_samples); - } - - - //--------------------------------------------------------------------- - // Tile processing - //--------------------------------------------------------------------- - - // Consume a tile of data samples - template < - bool IS_ALIGNED, // Whether the tile offset is aligned (quad-aligned for single-channel, pixel-aligned for multi-channel) - bool IS_FULL_TILE> // Whether the tile is full - __device__ __forceinline__ void ConsumeTile(OffsetT block_offset, int valid_samples) - { - SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS]; - bool is_valid[PIXELS_PER_THREAD]; - - // Load tile - LoadTile( - block_offset, - valid_samples, - samples, - Int2Type(), - Int2Type()); - - // Set valid flags - #pragma unroll - for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD; ++PIXEL) - is_valid[PIXEL] = IS_FULL_TILE || (((threadIdx.x * PIXELS_PER_THREAD + PIXEL) * NUM_CHANNELS) < valid_samples); - - // Accumulate samples -#if CUB_PTX_ARCH >= 120 - if (prefer_smem) - AccumulateSmemPixels(samples, is_valid); - else - AccumulateGmemPixels(samples, is_valid); -#else - AccumulateGmemPixels(samples, is_valid); -#endif - - } - - - // Consume row tiles. Specialized for work-stealing from queue - template - __device__ __forceinline__ void ConsumeTiles( - OffsetT num_row_pixels, ///< The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< The number of rows in the region of interest - OffsetT row_stride_samples, ///< The number of samples between starts of consecutive rows in the region of interest - int tiles_per_row, ///< Number of image tiles per row - GridQueue tile_queue, - Int2Type is_work_stealing) - { - - int num_tiles = num_rows * tiles_per_row; - int tile_idx = (blockIdx.y * gridDim.x) + blockIdx.x; - OffsetT num_even_share_tiles = gridDim.x * gridDim.y; - - while (tile_idx < num_tiles) - { - int row = tile_idx / tiles_per_row; - int col = tile_idx - (row * tiles_per_row); - OffsetT row_offset = row * row_stride_samples; - OffsetT col_offset = (col * TILE_SAMPLES); - OffsetT tile_offset = row_offset + col_offset; - - if (col == tiles_per_row - 1) - { - // Consume a partially-full tile at the end of the row - OffsetT num_remaining = (num_row_pixels * NUM_CHANNELS) - col_offset; - ConsumeTile(tile_offset, num_remaining); - } - else - { - // Consume full tile - ConsumeTile(tile_offset, TILE_SAMPLES); - } - - CTA_SYNC(); - - // Get next tile - if (threadIdx.x == 0) - temp_storage.tile_idx = tile_queue.Drain(1) + num_even_share_tiles; - - CTA_SYNC(); - - tile_idx = temp_storage.tile_idx; - } - } - - - // Consume row tiles. Specialized for even-share (striped across thread blocks) - template - __device__ __forceinline__ void ConsumeTiles( - OffsetT num_row_pixels, ///< The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< The number of rows in the region of interest - OffsetT row_stride_samples, ///< The number of samples between starts of consecutive rows in the region of interest - int tiles_per_row, ///< Number of image tiles per row - GridQueue tile_queue, - Int2Type is_work_stealing) - { - for (int row = blockIdx.y; row < num_rows; row += gridDim.y) - { - OffsetT row_begin = row * row_stride_samples; - OffsetT row_end = row_begin + (num_row_pixels * NUM_CHANNELS); - OffsetT tile_offset = row_begin + (blockIdx.x * TILE_SAMPLES); - - while (tile_offset < row_end) - { - OffsetT num_remaining = row_end - tile_offset; - - if (num_remaining < TILE_SAMPLES) - { - // Consume partial tile - ConsumeTile(tile_offset, num_remaining); - break; - } - - // Consume full tile - ConsumeTile(tile_offset, TILE_SAMPLES); - tile_offset += gridDim.x * TILE_SAMPLES; - } - } - } - - - //--------------------------------------------------------------------- - // Parameter extraction - //--------------------------------------------------------------------- - - // Return a native pixel pointer (specialized for CacheModifiedInputIterator types) - template < - CacheLoadModifier _MODIFIER, - typename _ValueT, - typename _OffsetT> - __device__ __forceinline__ SampleT* NativePointer(CacheModifiedInputIterator<_MODIFIER, _ValueT, _OffsetT> itr) - { - return itr.ptr; - } - - // Return a native pixel pointer (specialized for other types) - template - __device__ __forceinline__ SampleT* NativePointer(IteratorT itr) - { - return NULL; - } - - - - //--------------------------------------------------------------------- - // Interface - //--------------------------------------------------------------------- - - - /** - * Constructor - */ - __device__ __forceinline__ AgentHistogram( - TempStorage &temp_storage, ///< Reference to temp_storage - SampleIteratorT d_samples, ///< Input data to reduce - int (&num_output_bins)[NUM_ACTIVE_CHANNELS], ///< The number bins per final output histogram - int (&num_privatized_bins)[NUM_ACTIVE_CHANNELS], ///< The number bins per privatized histogram - CounterT* (&d_output_histograms)[NUM_ACTIVE_CHANNELS], ///< Reference to final output histograms - CounterT* (&d_privatized_histograms)[NUM_ACTIVE_CHANNELS], ///< Reference to privatized histograms - OutputDecodeOpT (&output_decode_op)[NUM_ACTIVE_CHANNELS], ///< The transform operator for determining output bin-ids from privatized counter indices, one for each channel - PrivatizedDecodeOpT (&privatized_decode_op)[NUM_ACTIVE_CHANNELS]) ///< The transform operator for determining privatized counter indices from samples, one for each channel - : - temp_storage(temp_storage.Alias()), - d_wrapped_samples(d_samples), - num_output_bins(num_output_bins), - num_privatized_bins(num_privatized_bins), - d_output_histograms(d_output_histograms), - privatized_decode_op(privatized_decode_op), - output_decode_op(output_decode_op), - d_native_samples(NativePointer(d_wrapped_samples)), - prefer_smem((MEM_PREFERENCE == SMEM) ? - true : // prefer smem privatized histograms - (MEM_PREFERENCE == GMEM) ? - false : // prefer gmem privatized histograms - blockIdx.x & 1) // prefer blended privatized histograms - { - int blockId = (blockIdx.y * gridDim.x) + blockIdx.x; - - // Initialize the locations of this block's privatized histograms - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - this->d_privatized_histograms[CHANNEL] = d_privatized_histograms[CHANNEL] + (blockId * num_privatized_bins[CHANNEL]); - } - - - /** - * Consume image - */ - __device__ __forceinline__ void ConsumeTiles( - OffsetT num_row_pixels, ///< The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< The number of rows in the region of interest - OffsetT row_stride_samples, ///< The number of samples between starts of consecutive rows in the region of interest - int tiles_per_row, ///< Number of image tiles per row - GridQueue tile_queue) ///< Queue descriptor for assigning tiles of work to thread blocks - { - // Check whether all row starting offsets are quad-aligned (in single-channel) or pixel-aligned (in multi-channel) - size_t row_bytes = sizeof(SampleT) * row_stride_samples; - size_t offset_mask = size_t(d_native_samples) | row_bytes; - int quad_mask = sizeof(SampleT) * 4 - 1; - int pixel_mask = AlignBytes::ALIGN_BYTES - 1; - bool quad_aligned_rows = (NUM_CHANNELS == 1) && ((offset_mask & quad_mask) == 0); - bool pixel_aligned_rows = (NUM_CHANNELS > 1) && ((offset_mask & pixel_mask) == 0); - - // Whether rows are aligned and can be vectorized - if (quad_aligned_rows || pixel_aligned_rows) - ConsumeTiles(num_row_pixels, num_rows, row_stride_samples, tiles_per_row, tile_queue, Int2Type()); - else - ConsumeTiles(num_row_pixels, num_rows, row_stride_samples, tiles_per_row, tile_queue, Int2Type()); - } - - - /** - * Initialize privatized bin counters. Specialized for privatized shared-memory counters - */ - __device__ __forceinline__ void InitBinCounters() - { - if (prefer_smem) - InitSmemBinCounters(); - else - InitGmemBinCounters(); - } - - - /** - * Store privatized histogram to device-accessible memory. Specialized for privatized shared-memory counters - */ - __device__ __forceinline__ void StoreOutput() - { - if (prefer_smem) - StoreSmemOutput(); - else - StoreGmemOutput(); - } - - -}; - - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/agent_radix_sort_downsweep.cuh b/ml-xgboost/cub/cub/agent/agent_radix_sort_downsweep.cuh deleted file mode 100644 index 9b9931a..0000000 --- a/ml-xgboost/cub/cub/agent/agent_radix_sort_downsweep.cuh +++ /dev/null @@ -1,753 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * AgentRadixSortDownsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort downsweep . - */ - - -#pragma once - -#include "../thread/thread_load.cuh" -#include "../block/block_load.cuh" -#include "../block/block_store.cuh" -#include "../block/block_radix_rank.cuh" -#include "../block/block_exchange.cuh" -#include "../util_type.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Tuning policy types - ******************************************************************************/ - -/** - * Types of scattering strategies - */ -enum RadixSortScatterAlgorithm -{ - RADIX_SORT_SCATTER_DIRECT, ///< Scatter directly from registers to global bins - RADIX_SORT_SCATTER_TWO_PHASE, ///< First scatter from registers into shared memory bins, then into global bins -}; - - -/** - * Parameterizable tuning policy type for AgentRadixSortDownsweep - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use - CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading keys (and values) - bool _MEMOIZE_OUTER_SCAN, ///< Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure. See BlockScanAlgorithm::BLOCK_SCAN_RAKING_MEMOIZE for more details. - BlockScanAlgorithm _INNER_SCAN_ALGORITHM, ///< The BlockScan algorithm algorithm to use - RadixSortScatterAlgorithm _SCATTER_ALGORITHM, ///< The scattering strategy to use - int _RADIX_BITS> ///< The number of radix bits, i.e., log2(bins) -struct AgentRadixSortDownsweepPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - RADIX_BITS = _RADIX_BITS, ///< The number of radix bits, i.e., log2(bins) - MEMOIZE_OUTER_SCAN = _MEMOIZE_OUTER_SCAN, ///< Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure. See BlockScanAlgorithm::BLOCK_SCAN_RAKING_MEMOIZE for more details. - }; - - static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use - static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading keys (and values) - static const BlockScanAlgorithm INNER_SCAN_ALGORITHM = _INNER_SCAN_ALGORITHM; ///< The BlockScan algorithm algorithm to use - static const RadixSortScatterAlgorithm SCATTER_ALGORITHM = _SCATTER_ALGORITHM; ///< The scattering strategy to use -}; - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - -/** - * \brief AgentRadixSortDownsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort downsweep . - */ -template < - typename AgentRadixSortDownsweepPolicy, ///< Parameterized AgentRadixSortDownsweepPolicy tuning policy type - bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low - typename KeyT, ///< KeyT type - typename ValueT, ///< ValueT type - typename OffsetT> ///< Signed integer type for global offsets -struct AgentRadixSortDownsweep -{ - //--------------------------------------------------------------------- - // Type definitions and constants - //--------------------------------------------------------------------- - - // Appropriate unsigned-bits representation of KeyT - typedef typename Traits::UnsignedBits UnsignedBits; - - static const UnsignedBits LOWEST_KEY = Traits::LOWEST_KEY; - static const UnsignedBits MAX_KEY = Traits::MAX_KEY; - - static const BlockLoadAlgorithm LOAD_ALGORITHM = AgentRadixSortDownsweepPolicy::LOAD_ALGORITHM; - static const CacheLoadModifier LOAD_MODIFIER = AgentRadixSortDownsweepPolicy::LOAD_MODIFIER; - static const BlockScanAlgorithm INNER_SCAN_ALGORITHM = AgentRadixSortDownsweepPolicy::INNER_SCAN_ALGORITHM; - static const RadixSortScatterAlgorithm SCATTER_ALGORITHM = AgentRadixSortDownsweepPolicy::SCATTER_ALGORITHM; - - enum - { - BLOCK_THREADS = AgentRadixSortDownsweepPolicy::BLOCK_THREADS, - ITEMS_PER_THREAD = AgentRadixSortDownsweepPolicy::ITEMS_PER_THREAD, - RADIX_BITS = AgentRadixSortDownsweepPolicy::RADIX_BITS, - MEMOIZE_OUTER_SCAN = AgentRadixSortDownsweepPolicy::MEMOIZE_OUTER_SCAN, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - - RADIX_DIGITS = 1 << RADIX_BITS, - KEYS_ONLY = Equals::VALUE, - - WARP_THREADS = CUB_PTX_LOG_WARP_THREADS, - WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, - - BYTES_PER_SIZET = sizeof(OffsetT), - LOG_BYTES_PER_SIZET = Log2::VALUE, - - LOG_SMEM_BANKS = CUB_PTX_LOG_SMEM_BANKS, - SMEM_BANKS = 1 << LOG_SMEM_BANKS, - - DIGITS_PER_SCATTER_PASS = BLOCK_THREADS / SMEM_BANKS, - SCATTER_PASSES = RADIX_DIGITS / DIGITS_PER_SCATTER_PASS, - - LOG_STORE_TXN_THREADS = LOG_SMEM_BANKS, - STORE_TXN_THREADS = 1 << LOG_STORE_TXN_THREADS, - }; - - // Input iterator wrapper type (for applying cache modifier)s - typedef CacheModifiedInputIterator KeysItr; - typedef CacheModifiedInputIterator ValuesItr; - - // BlockRadixRank type - typedef BlockRadixRank< - BLOCK_THREADS, - RADIX_BITS, - IS_DESCENDING, - MEMOIZE_OUTER_SCAN, - INNER_SCAN_ALGORITHM> BlockRadixRank; - - // BlockLoad type (keys) - typedef BlockLoad< - UnsignedBits, - BLOCK_THREADS, - ITEMS_PER_THREAD, - LOAD_ALGORITHM> BlockLoadKeys; - - // BlockLoad type (values) - typedef BlockLoad< - ValueT, - BLOCK_THREADS, - ITEMS_PER_THREAD, - LOAD_ALGORITHM> BlockLoadValues; - - // BlockExchange type (keys) - typedef BlockExchange< - UnsignedBits, - BLOCK_THREADS, - ITEMS_PER_THREAD> BlockExchangeKeys; - - // BlockExchange type (values) - typedef BlockExchange< - ValueT, - BLOCK_THREADS, - ITEMS_PER_THREAD> BlockExchangeValues; - - - /** - * Shared memory storage layout - */ - union __align__(16) _TempStorage - { - typename BlockLoadKeys::TempStorage load_keys; - typename BlockRadixRank::TempStorage ranking; - typename BlockLoadValues::TempStorage load_values; - typename BlockExchangeValues::TempStorage exchange_values; - - OffsetT exclusive_digit_prefix[RADIX_DIGITS]; - - struct - { - typename BlockExchangeKeys::TempStorage exchange_keys; - OffsetT relative_bin_offsets[RADIX_DIGITS + 1]; - }; - - }; - - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Thread fields - //--------------------------------------------------------------------- - - // Shared storage for this CTA - _TempStorage &temp_storage; - - // Input and output device pointers - KeysItr d_keys_in; - ValuesItr d_values_in; - UnsignedBits *d_keys_out; - ValueT *d_values_out; - - // The global scatter base offset for each digit (valid in the first RADIX_DIGITS threads) - OffsetT bin_offset; - - // The least-significant bit position of the current digit to extract - int current_bit; - - // Number of bits in current digit - int num_bits; - - // Whether to short-cirucit - int short_circuit; - - //--------------------------------------------------------------------- - // Utility methods - //--------------------------------------------------------------------- - - /** - * Scatter ranked keys directly to device-accessible memory - */ - template - __device__ __forceinline__ void ScatterKeys( - UnsignedBits (&twiddled_keys)[ITEMS_PER_THREAD], - OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD], - int (&ranks)[ITEMS_PER_THREAD], - OffsetT valid_items, - Int2Type /*scatter_algorithm*/) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - UnsignedBits digit = BFE(twiddled_keys[ITEM], current_bit, num_bits); - relative_bin_offsets[ITEM] = temp_storage.relative_bin_offsets[digit]; - - // Un-twiddle - UnsignedBits key = Traits::TwiddleOut(twiddled_keys[ITEM]); - - if (FULL_TILE || (ranks[ITEM] < valid_items)) - { - d_keys_out[relative_bin_offsets[ITEM] + ranks[ITEM]] = key; - } - } - } - - - /** - * Scatter ranked keys through shared memory, then to device-accessible memory - */ - template - __device__ __forceinline__ void ScatterKeys( - UnsignedBits (&twiddled_keys)[ITEMS_PER_THREAD], - OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD], - int (&ranks)[ITEMS_PER_THREAD], - OffsetT valid_items, - Int2Type /*scatter_algorithm*/) - { - UnsignedBits *smem = reinterpret_cast(&temp_storage.exchange_keys); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - smem[ranks[ITEM]] = twiddled_keys[ITEM]; - } - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - UnsignedBits key = smem[threadIdx.x + (ITEM * BLOCK_THREADS)]; - - UnsignedBits digit = BFE(key, current_bit, num_bits); - - relative_bin_offsets[ITEM] = temp_storage.relative_bin_offsets[digit]; - - // Un-twiddle - key = Traits::TwiddleOut(key); - - if (FULL_TILE || - (static_cast(threadIdx.x + (ITEM * BLOCK_THREADS)) < valid_items)) - { - d_keys_out[relative_bin_offsets[ITEM] + threadIdx.x + (ITEM * BLOCK_THREADS)] = key; - } - } - } - - - - /** - * Scatter ranked values directly to device-accessible memory - */ - template - __device__ __forceinline__ void ScatterValues( - ValueT (&values)[ITEMS_PER_THREAD], - OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD], - int (&ranks)[ITEMS_PER_THREAD], - OffsetT valid_items, - Int2Type /*scatter_algorithm*/) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (FULL_TILE || (ranks[ITEM] < valid_items)) - { - d_values_out[relative_bin_offsets[ITEM] + ranks[ITEM]] = values[ITEM]; - } - } - } - - - /** - * Scatter ranked values through shared memory, then to device-accessible memory - */ - template - __device__ __forceinline__ void ScatterValues( - ValueT (&values)[ITEMS_PER_THREAD], - OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD], - int (&ranks)[ITEMS_PER_THREAD], - OffsetT valid_items, - Int2Type /*scatter_algorithm*/) - { - CTA_SYNC(); - - ValueT *smem = reinterpret_cast(&temp_storage.exchange_values); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - smem[ranks[ITEM]] = values[ITEM]; - } - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - ValueT value = smem[threadIdx.x + (ITEM * BLOCK_THREADS)]; - - if (FULL_TILE || - (static_cast(threadIdx.x + (ITEM * BLOCK_THREADS)) < valid_items)) - { - d_values_out[relative_bin_offsets[ITEM] + threadIdx.x + (ITEM * BLOCK_THREADS)] = value; - } - } - } - - - /** - * Load a tile of items (specialized for full tile) - */ - template - __device__ __forceinline__ void LoadItems( - BlockLoadT &block_loader, - T (&items)[ITEMS_PER_THREAD], - InputIteratorT d_in, - OffsetT /*valid_items*/, - Int2Type /*is_full_tile*/) - { - block_loader.Load(d_in, items); - } - - - /** - * Load a tile of items (specialized for full tile) - */ - template - __device__ __forceinline__ void LoadItems( - BlockLoadT &block_loader, - T (&items)[ITEMS_PER_THREAD], - InputIteratorT d_in, - OffsetT /*valid_items*/, - T /*oob_item*/, - Int2Type /*is_full_tile*/) - { - block_loader.Load(d_in, items); - } - - - /** - * Load a tile of items (specialized for partial tile) - */ - template - __device__ __forceinline__ void LoadItems( - BlockLoadT &block_loader, - T (&items)[ITEMS_PER_THREAD], - InputIteratorT d_in, - OffsetT valid_items, - Int2Type /*is_full_tile*/) - { - block_loader.Load(d_in, items, valid_items); - } - - /** - * Load a tile of items (specialized for partial tile) - */ - template - __device__ __forceinline__ void LoadItems( - BlockLoadT &block_loader, - T (&items)[ITEMS_PER_THREAD], - InputIteratorT d_in, - OffsetT valid_items, - T oob_item, - Int2Type /*is_full_tile*/) - { - block_loader.Load(d_in, items, valid_items, oob_item); - } - - - /** - * Truck along associated values - */ - template - __device__ __forceinline__ void GatherScatterValues( - OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD], - int (&ranks)[ITEMS_PER_THREAD], - OffsetT block_offset, - OffsetT valid_items, - Int2Type /*is_keys_only*/) - { - CTA_SYNC(); - - ValueT values[ITEMS_PER_THREAD]; - - BlockLoadValues loader(temp_storage.load_values); - LoadItems( - loader, - values, - d_values_in + block_offset, - valid_items, - Int2Type()); - - ScatterValues( - values, - relative_bin_offsets, - ranks, - valid_items, - Int2Type()); - } - - - /** - * Truck along associated values (specialized for key-only sorting) - */ - template - __device__ __forceinline__ void GatherScatterValues( - OffsetT (&/*relative_bin_offsets*/)[ITEMS_PER_THREAD], - int (&/*ranks*/)[ITEMS_PER_THREAD], - OffsetT /*block_offset*/, - OffsetT /*valid_items*/, - Int2Type /*is_keys_only*/) - {} - - - /** - * Process tile - */ - template - __device__ __forceinline__ void ProcessTile( - OffsetT block_offset, - const OffsetT &valid_items = TILE_ITEMS) - { - // Per-thread tile data - UnsignedBits keys[ITEMS_PER_THREAD]; // Keys - UnsignedBits twiddled_keys[ITEMS_PER_THREAD]; // Twiddled keys - int ranks[ITEMS_PER_THREAD]; // For each key, the local rank within the CTA - OffsetT relative_bin_offsets[ITEMS_PER_THREAD]; // For each key, the global scatter base offset of the corresponding digit - - // Assign default (min/max) value to all keys - UnsignedBits default_key = (IS_DESCENDING) ? LOWEST_KEY : MAX_KEY; - - // Load tile of keys - BlockLoadKeys loader(temp_storage.load_keys); - LoadItems( - loader, - keys, - d_keys_in + block_offset, - valid_items, - default_key, - Int2Type()); - - CTA_SYNC(); - - // Twiddle key bits if necessary - #pragma unroll - for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) - { - twiddled_keys[KEY] = Traits::TwiddleIn(keys[KEY]); - } - - // Rank the twiddled keys - int exclusive_digit_prefix; - BlockRadixRank(temp_storage.ranking).RankKeys( - twiddled_keys, - ranks, - current_bit, - num_bits, - exclusive_digit_prefix); - - CTA_SYNC(); - - // Share exclusive digit prefix - if (threadIdx.x < RADIX_DIGITS) - { - // Store exclusive prefix - temp_storage.exclusive_digit_prefix[threadIdx.x] = exclusive_digit_prefix; - } - - CTA_SYNC(); - - // Get inclusive digit prefix - int inclusive_digit_prefix; - if (threadIdx.x < RADIX_DIGITS) - { - if (IS_DESCENDING) - { - // Get inclusive digit prefix from exclusive prefix (higher bins come first) - inclusive_digit_prefix = (threadIdx.x == 0) ? - (BLOCK_THREADS * ITEMS_PER_THREAD) : - temp_storage.exclusive_digit_prefix[threadIdx.x - 1]; - } - else - { - // Get inclusive digit prefix from exclusive prefix (lower bins come first) - inclusive_digit_prefix = (threadIdx.x == RADIX_DIGITS - 1) ? - (BLOCK_THREADS * ITEMS_PER_THREAD) : - temp_storage.exclusive_digit_prefix[threadIdx.x + 1]; - } - } - - CTA_SYNC(); - - // Update global scatter base offsets for each digit - if (threadIdx.x < RADIX_DIGITS) - { - - - bin_offset -= exclusive_digit_prefix; - temp_storage.relative_bin_offsets[threadIdx.x] = bin_offset; - bin_offset += inclusive_digit_prefix; - } - - CTA_SYNC(); - - // Scatter keys - ScatterKeys(twiddled_keys, relative_bin_offsets, ranks, valid_items, Int2Type()); - - // Gather/scatter values - GatherScatterValues(relative_bin_offsets , ranks, block_offset, valid_items, Int2Type()); - } - - //--------------------------------------------------------------------- - // Copy shortcut - //--------------------------------------------------------------------- - - /** - * Copy tiles within the range of input - */ - template < - typename InputIteratorT, - typename T> - __device__ __forceinline__ void Copy( - InputIteratorT d_in, - T *d_out, - OffsetT block_offset, - OffsetT block_end) - { - // Simply copy the input - while (block_offset + TILE_ITEMS <= block_end) - { - T items[ITEMS_PER_THREAD]; - - LoadDirectStriped(threadIdx.x, d_in + block_offset, items); - CTA_SYNC(); - StoreDirectStriped(threadIdx.x, d_out + block_offset, items); - - block_offset += TILE_ITEMS; - } - - // Clean up last partial tile with guarded-I/O - if (block_offset < block_end) - { - OffsetT valid_items = block_end - block_offset; - - T items[ITEMS_PER_THREAD]; - - LoadDirectStriped(threadIdx.x, d_in + block_offset, items, valid_items); - CTA_SYNC(); - StoreDirectStriped(threadIdx.x, d_out + block_offset, items, valid_items); - } - } - - - /** - * Copy tiles within the range of input (specialized for NullType) - */ - template - __device__ __forceinline__ void Copy( - InputIteratorT /*d_in*/, - NullType * /*d_out*/, - OffsetT /*block_offset*/, - OffsetT /*block_end*/) - {} - - - //--------------------------------------------------------------------- - // Interface - //--------------------------------------------------------------------- - - /** - * Constructor - */ - __device__ __forceinline__ AgentRadixSortDownsweep( - TempStorage &temp_storage, - OffsetT num_items, - OffsetT bin_offset, - const KeyT *d_keys_in, - KeyT *d_keys_out, - const ValueT *d_values_in, - ValueT *d_values_out, - int current_bit, - int num_bits) - : - temp_storage(temp_storage.Alias()), - bin_offset(bin_offset), - d_keys_in(reinterpret_cast(d_keys_in)), - d_values_in(d_values_in), - d_keys_out(reinterpret_cast(d_keys_out)), - d_values_out(d_values_out), - current_bit(current_bit), - num_bits(num_bits), - short_circuit(1) - { - if (threadIdx.x < RADIX_DIGITS) - { - // Short circuit if the histogram has only bin counts of only zeros or problem-size - short_circuit = ((bin_offset == 0) || (bin_offset == num_items)); - } - - short_circuit = CTA_SYNC_AND(short_circuit); - } - - - /** - * Constructor - */ - __device__ __forceinline__ AgentRadixSortDownsweep( - TempStorage &temp_storage, - OffsetT num_items, - OffsetT *d_spine, - const KeyT *d_keys_in, - KeyT *d_keys_out, - const ValueT *d_values_in, - ValueT *d_values_out, - int current_bit, - int num_bits) - : - temp_storage(temp_storage.Alias()), - d_keys_in(reinterpret_cast(d_keys_in)), - d_values_in(d_values_in), - d_keys_out(reinterpret_cast(d_keys_out)), - d_values_out(d_values_out), - current_bit(current_bit), - num_bits(num_bits), - short_circuit(1) - { - // Load digit bin offsets (each of the first RADIX_DIGITS threads will load an offset for that digit) - if (threadIdx.x < RADIX_DIGITS) - { - int bin_idx = (IS_DESCENDING) ? - RADIX_DIGITS - threadIdx.x - 1 : - threadIdx.x; - - // Short circuit if the first block's histogram has only bin counts of only zeros or problem-size - OffsetT first_block_bin_offset = d_spine[gridDim.x * bin_idx]; - short_circuit = ((first_block_bin_offset == 0) || (first_block_bin_offset == num_items)); - - // Load my block's bin offset for my bin - bin_offset = d_spine[(gridDim.x * bin_idx) + blockIdx.x]; - } - - short_circuit = CTA_SYNC_AND(short_circuit); - } - - - /** - * Distribute keys from a segment of input tiles. - */ - __device__ __forceinline__ void ProcessRegion( - OffsetT block_offset, - OffsetT block_end) - { - if (short_circuit) - { - // Copy keys - Copy(d_keys_in, d_keys_out, block_offset, block_end); - - // Copy values - Copy(d_values_in, d_values_out, block_offset, block_end); - } - else - { - // Process full tiles of tile_items - while (block_offset + TILE_ITEMS <= block_end) - { - ProcessTile(block_offset); - block_offset += TILE_ITEMS; - - CTA_SYNC(); - } - - // Clean up last partial tile with guarded-I/O - if (block_offset < block_end) - { - ProcessTile(block_offset, block_end - block_offset); - } - } - } - -}; - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/agent_radix_sort_upsweep.cuh b/ml-xgboost/cub/cub/agent/agent_radix_sort_upsweep.cuh deleted file mode 100644 index 88e27d3..0000000 --- a/ml-xgboost/cub/cub/agent/agent_radix_sort_upsweep.cuh +++ /dev/null @@ -1,449 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * AgentRadixSortUpsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort upsweep . - */ - -#pragma once - -#include "../thread/thread_reduce.cuh" -#include "../thread/thread_load.cuh" -#include "../block/block_load.cuh" -#include "../util_type.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/****************************************************************************** - * Tuning policy types - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for AgentRadixSortUpsweep - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading keys - int _RADIX_BITS> ///< The number of radix bits, i.e., log2(bins) -struct AgentRadixSortUpsweepPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - RADIX_BITS = _RADIX_BITS, ///< The number of radix bits, i.e., log2(bins) - }; - - static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading keys -}; - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - -/** - * \brief AgentRadixSortUpsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort upsweep . - */ -template < - typename AgentRadixSortUpsweepPolicy, ///< Parameterized AgentRadixSortUpsweepPolicy tuning policy type - typename KeyT, ///< KeyT type - typename OffsetT> ///< Signed integer type for global offsets -struct AgentRadixSortUpsweep -{ - - //--------------------------------------------------------------------- - // Type definitions and constants - //--------------------------------------------------------------------- - - typedef typename Traits::UnsignedBits UnsignedBits; - - // Integer type for digit counters (to be packed into words of PackedCounters) - typedef unsigned char DigitCounter; - - // Integer type for packing DigitCounters into columns of shared memory banks - typedef unsigned int PackedCounter; - - static const CacheLoadModifier LOAD_MODIFIER = AgentRadixSortUpsweepPolicy::LOAD_MODIFIER; - - enum - { - RADIX_BITS = AgentRadixSortUpsweepPolicy::RADIX_BITS, - BLOCK_THREADS = AgentRadixSortUpsweepPolicy::BLOCK_THREADS, - KEYS_PER_THREAD = AgentRadixSortUpsweepPolicy::ITEMS_PER_THREAD, - - RADIX_DIGITS = 1 << RADIX_BITS, - - LOG_WARP_THREADS = CUB_PTX_LOG_WARP_THREADS, - WARP_THREADS = 1 << LOG_WARP_THREADS, - WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, - - TILE_ITEMS = BLOCK_THREADS * KEYS_PER_THREAD, - - BYTES_PER_COUNTER = sizeof(DigitCounter), - LOG_BYTES_PER_COUNTER = Log2::VALUE, - - PACKING_RATIO = sizeof(PackedCounter) / sizeof(DigitCounter), - LOG_PACKING_RATIO = Log2::VALUE, - - LOG_COUNTER_LANES = CUB_MAX(0, RADIX_BITS - LOG_PACKING_RATIO), - COUNTER_LANES = 1 << LOG_COUNTER_LANES, - - // To prevent counter overflow, we must periodically unpack and aggregate the - // digit counters back into registers. Each counter lane is assigned to a - // warp for aggregation. - - LANES_PER_WARP = CUB_MAX(1, (COUNTER_LANES + WARPS - 1) / WARPS), - - // Unroll tiles in batches without risk of counter overflow - UNROLL_COUNT = CUB_MIN(64, 255 / KEYS_PER_THREAD), - UNROLLED_ELEMENTS = UNROLL_COUNT * TILE_ITEMS, - }; - - - // Input iterator wrapper type (for applying cache modifier)s - typedef CacheModifiedInputIterator KeysItr; - - /** - * Shared memory storage layout - */ - struct _TempStorage - { - union - { - DigitCounter digit_counters[COUNTER_LANES][BLOCK_THREADS][PACKING_RATIO]; - PackedCounter packed_counters[COUNTER_LANES][BLOCK_THREADS]; - OffsetT digit_partials[RADIX_DIGITS][WARP_THREADS + 1]; - }; - }; - - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Thread fields (aggregate state bundle) - //--------------------------------------------------------------------- - - // Shared storage for this CTA - _TempStorage &temp_storage; - - // Thread-local counters for periodically aggregating composite-counter lanes - OffsetT local_counts[LANES_PER_WARP][PACKING_RATIO]; - - // Input and output device pointers - KeysItr d_keys_in; - - // The least-significant bit position of the current digit to extract - int current_bit; - - // Number of bits in current digit - int num_bits; - - - - //--------------------------------------------------------------------- - // Helper structure for templated iteration - //--------------------------------------------------------------------- - - // Iterate - template - struct Iterate - { - // BucketKeys - static __device__ __forceinline__ void BucketKeys( - AgentRadixSortUpsweep &cta, - UnsignedBits keys[KEYS_PER_THREAD]) - { - cta.Bucket(keys[COUNT]); - - // Next - Iterate::BucketKeys(cta, keys); - } - }; - - // Terminate - template - struct Iterate - { - // BucketKeys - static __device__ __forceinline__ void BucketKeys(AgentRadixSortUpsweep &/*cta*/, UnsignedBits /*keys*/[KEYS_PER_THREAD]) {} - }; - - - //--------------------------------------------------------------------- - // Utility methods - //--------------------------------------------------------------------- - - /** - * Decode a key and increment corresponding smem digit counter - */ - __device__ __forceinline__ void Bucket(UnsignedBits key) - { - // Perform transform op - UnsignedBits converted_key = Traits::TwiddleIn(key); - - // Extract current digit bits - UnsignedBits digit = BFE(converted_key, current_bit, num_bits); - - // Get sub-counter offset - UnsignedBits sub_counter = digit & (PACKING_RATIO - 1); - - // Get row offset - UnsignedBits row_offset = digit >> LOG_PACKING_RATIO; - - // Increment counter - temp_storage.digit_counters[row_offset][threadIdx.x][sub_counter]++; - } - - - /** - * Reset composite counters - */ - __device__ __forceinline__ void ResetDigitCounters() - { - #pragma unroll - for (int LANE = 0; LANE < COUNTER_LANES; LANE++) - { - temp_storage.packed_counters[LANE][threadIdx.x] = 0; - } - } - - - /** - * Reset the unpacked counters in each thread - */ - __device__ __forceinline__ void ResetUnpackedCounters() - { - #pragma unroll - for (int LANE = 0; LANE < LANES_PER_WARP; LANE++) - { - #pragma unroll - for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++) - { - local_counts[LANE][UNPACKED_COUNTER] = 0; - } - } - } - - - /** - * Extracts and aggregates the digit counters for each counter lane - * owned by this warp - */ - __device__ __forceinline__ void UnpackDigitCounts() - { - unsigned int warp_id = threadIdx.x >> LOG_WARP_THREADS; - unsigned int warp_tid = threadIdx.x & (WARP_THREADS - 1); - - #pragma unroll - for (int LANE = 0; LANE < LANES_PER_WARP; LANE++) - { - const int counter_lane = (LANE * WARPS) + warp_id; - if (counter_lane < COUNTER_LANES) - { - #pragma unroll - for (int PACKED_COUNTER = 0; PACKED_COUNTER < BLOCK_THREADS; PACKED_COUNTER += WARP_THREADS) - { - #pragma unroll - for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++) - { - OffsetT counter = temp_storage.digit_counters[counter_lane][warp_tid + PACKED_COUNTER][UNPACKED_COUNTER]; - local_counts[LANE][UNPACKED_COUNTER] += counter; - } - } - } - } - } - - - /** - * Places unpacked counters into smem for final digit reduction - */ - __device__ __forceinline__ void ReduceUnpackedCounts(OffsetT &bin_count) - { - unsigned int warp_id = threadIdx.x >> LOG_WARP_THREADS; - unsigned int warp_tid = threadIdx.x & (WARP_THREADS - 1); - - // Place unpacked digit counters in shared memory - #pragma unroll - for (int LANE = 0; LANE < LANES_PER_WARP; LANE++) - { - int counter_lane = (LANE * WARPS) + warp_id; - if (counter_lane < COUNTER_LANES) - { - int digit_row = counter_lane << LOG_PACKING_RATIO; - - #pragma unroll - for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++) - { - temp_storage.digit_partials[digit_row + UNPACKED_COUNTER][warp_tid] = - local_counts[LANE][UNPACKED_COUNTER]; - } - } - } - - CTA_SYNC(); - - // Rake-reduce bin_count reductions - if (threadIdx.x < RADIX_DIGITS) - { - bin_count = ThreadReduce( - temp_storage.digit_partials[threadIdx.x], - Sum()); - } - } - - - /** - * Processes a single, full tile - */ - __device__ __forceinline__ void ProcessFullTile(OffsetT block_offset) - { - // Tile of keys - UnsignedBits keys[KEYS_PER_THREAD]; - - LoadDirectStriped(threadIdx.x, d_keys_in + block_offset, keys); - - // Prevent hoisting - CTA_SYNC(); - - // Bucket tile of keys - Iterate<0, KEYS_PER_THREAD>::BucketKeys(*this, keys); - } - - - /** - * Processes a single load (may have some threads masked off) - */ - __device__ __forceinline__ void ProcessPartialTile( - OffsetT block_offset, - const OffsetT &block_end) - { - // Process partial tile if necessary using single loads - block_offset += threadIdx.x; - while (block_offset < block_end) - { - // Load and bucket key - UnsignedBits key = d_keys_in[block_offset]; - Bucket(key); - block_offset += BLOCK_THREADS; - } - } - - - //--------------------------------------------------------------------- - // Interface - //--------------------------------------------------------------------- - - /** - * Constructor - */ - __device__ __forceinline__ AgentRadixSortUpsweep( - TempStorage &temp_storage, - const KeyT *d_keys_in, - int current_bit, - int num_bits) - : - temp_storage(temp_storage.Alias()), - d_keys_in(reinterpret_cast(d_keys_in)), - current_bit(current_bit), - num_bits(num_bits) - {} - - - /** - * Compute radix digit histograms from a segment of input tiles. - */ - __device__ __forceinline__ void ProcessRegion( - OffsetT block_offset, - const OffsetT &block_end, - OffsetT &bin_count) ///< [out] The digit count for tid'th bin (output param, valid in the first RADIX_DIGITS threads) - { - // Reset digit counters in smem and unpacked counters in registers - ResetDigitCounters(); - ResetUnpackedCounters(); - - // Unroll batches of full tiles - while (block_offset + UNROLLED_ELEMENTS <= block_end) - { - for (int i = 0; i < UNROLL_COUNT; ++i) - { - ProcessFullTile(block_offset); - block_offset += TILE_ITEMS; - } - - CTA_SYNC(); - - // Aggregate back into local_count registers to prevent overflow - UnpackDigitCounts(); - - CTA_SYNC(); - - // Reset composite counters in lanes - ResetDigitCounters(); - } - - // Unroll single full tiles - while (block_offset + TILE_ITEMS <= block_end) - { - ProcessFullTile(block_offset); - block_offset += TILE_ITEMS; - } - - // Process partial tile if necessary - ProcessPartialTile( - block_offset, - block_end); - - CTA_SYNC(); - - // Aggregate back into local_count registers - UnpackDigitCounts(); - - CTA_SYNC(); - - // Final raking reduction of counts by bin - ReduceUnpackedCounts(bin_count); - } - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/agent_reduce.cuh b/ml-xgboost/cub/cub/agent/agent_reduce.cuh deleted file mode 100644 index ad8fbcf..0000000 --- a/ml-xgboost/cub/cub/agent/agent_reduce.cuh +++ /dev/null @@ -1,475 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::AgentReduce implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduction . - */ - -#pragma once - -#include - -#include "../block/block_load.cuh" -#include "../block/block_reduce.cuh" -#include "../grid/grid_mapping.cuh" -#include "../grid/grid_queue.cuh" -#include "../grid/grid_even_share.cuh" -#include "../util_type.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../util_namespace.cuh" - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Tuning policy types - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for AgentReduce - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - int _VECTOR_LOAD_LENGTH, ///< Number of items per vectorized load - BlockReduceAlgorithm _BLOCK_ALGORITHM, ///< Cooperative block-wide reduction algorithm to use - CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements - GridMappingStrategy _GRID_MAPPING> ///< How to map tiles of input onto thread blocks -struct AgentReducePolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - VECTOR_LOAD_LENGTH = _VECTOR_LOAD_LENGTH, ///< Number of items per vectorized load - }; - - static const BlockReduceAlgorithm BLOCK_ALGORITHM = _BLOCK_ALGORITHM; ///< Cooperative block-wide reduction algorithm to use - static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements - static const GridMappingStrategy GRID_MAPPING = _GRID_MAPPING; ///< How to map tiles of input onto thread blocks -}; - - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - -/** - * \brief AgentReduce implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduction . - * - * Each thread reduces only the values it loads. If \p FIRST_TILE, this - * partial reduction is stored into \p thread_aggregate. Otherwise it is - * accumulated into \p thread_aggregate. - */ -template < - typename AgentReducePolicy, ///< Parameterized AgentReducePolicy tuning policy type - typename InputIteratorT, ///< Random-access iterator type for input - typename OutputIteratorT, ///< Random-access iterator type for output - typename OffsetT, ///< Signed integer type for global offsets - typename ReductionOp> ///< Binary reduction operator type having member T operator()(const T &a, const T &b) -struct AgentReduce -{ - - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - /// The input value type - typedef typename std::iterator_traits::value_type InputT; - - /// The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - /// Vector type of InputT for data movement - typedef typename CubVector::Type VectorT; - - /// Input iterator wrapper type (for applying cache modifier) - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedInputIterator - InputIteratorT>::Type // Directly use the supplied input iterator type - WrappedInputIteratorT; - - /// Constants - enum - { - BLOCK_THREADS = AgentReducePolicy::BLOCK_THREADS, - ITEMS_PER_THREAD = AgentReducePolicy::ITEMS_PER_THREAD, - VECTOR_LOAD_LENGTH = CUB_MIN(ITEMS_PER_THREAD, AgentReducePolicy::VECTOR_LOAD_LENGTH), - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - - // Can vectorize according to the policy if the input iterator is a native pointer to a primitive type - ATTEMPT_VECTORIZATION = (VECTOR_LOAD_LENGTH > 1) && - (ITEMS_PER_THREAD % VECTOR_LOAD_LENGTH == 0) && - (IsPointer::VALUE) && Traits::PRIMITIVE, - - }; - - static const CacheLoadModifier LOAD_MODIFIER = AgentReducePolicy::LOAD_MODIFIER; - static const BlockReduceAlgorithm BLOCK_ALGORITHM = AgentReducePolicy::BLOCK_ALGORITHM; - - /// Parameterized BlockReduce primitive - typedef BlockReduce BlockReduceT; - - /// Shared memory type required by this thread block - struct _TempStorage - { - typename BlockReduceT::TempStorage reduce; - OffsetT dequeue_offset; - }; - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - _TempStorage& temp_storage; ///< Reference to temp_storage - InputIteratorT d_in; ///< Input data to reduce - WrappedInputIteratorT d_wrapped_in; ///< Wrapped input data to reduce - ReductionOp reduction_op; ///< Binary reduction operator - - - //--------------------------------------------------------------------- - // Utility - //--------------------------------------------------------------------- - - - // Whether or not the input is aligned with the vector type (specialized for types we can vectorize) - template - static __device__ __forceinline__ bool IsAligned( - Iterator d_in, - Int2Type /*can_vectorize*/) - { - return (size_t(d_in) & (sizeof(VectorT) - 1)) == 0; - } - - // Whether or not the input is aligned with the vector type (specialized for types we cannot vectorize) - template - static __device__ __forceinline__ bool IsAligned( - Iterator /*d_in*/, - Int2Type /*can_vectorize*/) - { - return false; - } - - - //--------------------------------------------------------------------- - // Constructor - //--------------------------------------------------------------------- - - /** - * Constructor - */ - __device__ __forceinline__ AgentReduce( - TempStorage& temp_storage, ///< Reference to temp_storage - InputIteratorT d_in, ///< Input data to reduce - ReductionOp reduction_op) ///< Binary reduction operator - : - temp_storage(temp_storage.Alias()), - d_in(d_in), - d_wrapped_in(d_in), - reduction_op(reduction_op) - {} - - - //--------------------------------------------------------------------- - // Tile consumption - //--------------------------------------------------------------------- - - /** - * Consume a full tile of input (non-vectorized) - */ - template - __device__ __forceinline__ void ConsumeTile( - OutputT &thread_aggregate, - OffsetT block_offset, ///< The offset the tile to consume - int /*valid_items*/, ///< The number of valid items in the tile - Int2Type /*is_full_tile*/, ///< Whether or not this is a full tile - Int2Type /*can_vectorize*/) ///< Whether or not we can vectorize loads - { - OutputT items[ITEMS_PER_THREAD]; - - // Load items in striped fashion - LoadDirectStriped(threadIdx.x, d_wrapped_in + block_offset, items); - - // Reduce items within each thread stripe - thread_aggregate = (IS_FIRST_TILE) ? - ThreadReduce(items, reduction_op) : - ThreadReduce(items, reduction_op, thread_aggregate); - } - - - /** - * Consume a full tile of input (vectorized) - */ - template - __device__ __forceinline__ void ConsumeTile( - OutputT &thread_aggregate, - OffsetT block_offset, ///< The offset the tile to consume - int /*valid_items*/, ///< The number of valid items in the tile - Int2Type /*is_full_tile*/, ///< Whether or not this is a full tile - Int2Type /*can_vectorize*/) ///< Whether or not we can vectorize loads - { - // Alias items as an array of VectorT and load it in striped fashion - enum { WORDS = ITEMS_PER_THREAD / VECTOR_LOAD_LENGTH }; - - // Fabricate a vectorized input iterator - InputT *d_in_unqualified = const_cast(d_in) + block_offset + (threadIdx.x * VECTOR_LOAD_LENGTH); - CacheModifiedInputIterator d_vec_in( - reinterpret_cast(d_in_unqualified)); - - // Load items as vector items - InputT input_items[ITEMS_PER_THREAD]; - VectorT *vec_items = reinterpret_cast(input_items); - #pragma unroll - for (int i = 0; i < WORDS; ++i) - vec_items[i] = d_vec_in[BLOCK_THREADS * i]; - - // Convert from input type to output type - OutputT items[ITEMS_PER_THREAD]; - #pragma unroll - for (int i = 0; i < ITEMS_PER_THREAD; ++i) - items[i] = input_items[i]; - - // Reduce items within each thread stripe - thread_aggregate = (IS_FIRST_TILE) ? - ThreadReduce(items, reduction_op) : - ThreadReduce(items, reduction_op, thread_aggregate); - } - - - /** - * Consume a partial tile of input - */ - template - __device__ __forceinline__ void ConsumeTile( - OutputT &thread_aggregate, - OffsetT block_offset, ///< The offset the tile to consume - int valid_items, ///< The number of valid items in the tile - Int2Type /*is_full_tile*/, ///< Whether or not this is a full tile - Int2Type /*can_vectorize*/) ///< Whether or not we can vectorize loads - { - // Partial tile - int thread_offset = threadIdx.x; - - // Read first item - if ((IS_FIRST_TILE) && (thread_offset < valid_items)) - { - thread_aggregate = d_wrapped_in[block_offset + thread_offset]; - thread_offset += BLOCK_THREADS; - } - - // Continue reading items (block-striped) - while (thread_offset < valid_items) - { - OutputT item = d_wrapped_in[block_offset + thread_offset]; - thread_aggregate = reduction_op(thread_aggregate, item); - thread_offset += BLOCK_THREADS; - } - } - - - //--------------------------------------------------------------- - // Consume a contiguous segment of tiles - //--------------------------------------------------------------------- - - /** - * \brief Reduce a contiguous segment of input tiles - */ - template - __device__ __forceinline__ OutputT ConsumeRange( - OffsetT block_offset, ///< [in] Threadblock begin offset (inclusive) - OffsetT block_end, ///< [in] Threadblock end offset (exclusive) - Int2Type can_vectorize) ///< Whether or not we can vectorize loads - { - OutputT thread_aggregate; - - if (block_offset + TILE_ITEMS > block_end) - { - // First tile isn't full (not all threads have valid items) - int valid_items = block_end - block_offset; - ConsumeTile(thread_aggregate, block_offset, valid_items, Int2Type(), can_vectorize); - return BlockReduceT(temp_storage.reduce).Reduce(thread_aggregate, reduction_op, valid_items); - } - - // At least one full block - ConsumeTile(thread_aggregate, block_offset, TILE_ITEMS, Int2Type(), can_vectorize); - block_offset += TILE_ITEMS; - - // Consume subsequent full tiles of input - while (block_offset + TILE_ITEMS <= block_end) - { - ConsumeTile(thread_aggregate, block_offset, TILE_ITEMS, Int2Type(), can_vectorize); - block_offset += TILE_ITEMS; - } - - // Consume a partially-full tile - if (block_offset < block_end) - { - int valid_items = block_end - block_offset; - ConsumeTile(thread_aggregate, block_offset, valid_items, Int2Type(), can_vectorize); - } - - // Compute block-wide reduction (all threads have valid items) - return BlockReduceT(temp_storage.reduce).Reduce(thread_aggregate, reduction_op); - } - - - /** - * \brief Reduce a contiguous segment of input tiles - */ - __device__ __forceinline__ OutputT ConsumeRange( - OffsetT block_offset, ///< [in] Threadblock begin offset (inclusive) - OffsetT block_end) ///< [in] Threadblock end offset (exclusive) - { - return (IsAligned(d_in + block_offset, Int2Type())) ? - ConsumeRange(block_offset, block_end, Int2Type()) : - ConsumeRange(block_offset, block_end, Int2Type()); - } - - - /** - * Reduce a contiguous segment of input tiles - */ - __device__ __forceinline__ OutputT ConsumeTiles( - OffsetT /*num_items*/, ///< [in] Total number of global input items - GridEvenShare &even_share, ///< [in] GridEvenShare descriptor - GridQueue &/*queue*/, ///< [in,out] GridQueue descriptor - Int2Type /*is_even_share*/) ///< [in] Marker type indicating this is an even-share mapping - { - // Initialize even-share descriptor for this thread block - even_share.BlockInit(); - - return (IsAligned(d_in, Int2Type())) ? - ConsumeRange(even_share.block_offset, even_share.block_end, Int2Type()) : - ConsumeRange(even_share.block_offset, even_share.block_end, Int2Type()); - - } - - - //--------------------------------------------------------------------- - // Dynamically consume tiles - //--------------------------------------------------------------------- - - /** - * Dequeue and reduce tiles of items as part of a inter-block reduction - */ - template - __device__ __forceinline__ OutputT ConsumeTiles( - int num_items, ///< Total number of input items - GridQueue queue, ///< Queue descriptor for assigning tiles of work to thread blocks - Int2Type can_vectorize) ///< Whether or not we can vectorize loads - { - // We give each thread block at least one tile of input. - OutputT thread_aggregate; - OffsetT block_offset = blockIdx.x * TILE_ITEMS; - OffsetT even_share_base = gridDim.x * TILE_ITEMS; - - if (block_offset + TILE_ITEMS > num_items) - { - // First tile isn't full (not all threads have valid items) - int valid_items = num_items - block_offset; - ConsumeTile(thread_aggregate, block_offset, valid_items, Int2Type(), can_vectorize); - return BlockReduceT(temp_storage.reduce).Reduce(thread_aggregate, reduction_op, valid_items); - } - - // Consume first full tile of input - ConsumeTile(thread_aggregate, block_offset, TILE_ITEMS, Int2Type(), can_vectorize); - - if (num_items > even_share_base) - { - // Dequeue a tile of items - if (threadIdx.x == 0) - temp_storage.dequeue_offset = queue.Drain(TILE_ITEMS) + even_share_base; - - CTA_SYNC(); - - // Grab tile offset and check if we're done with full tiles - block_offset = temp_storage.dequeue_offset; - - // Consume more full tiles - while (block_offset + TILE_ITEMS <= num_items) - { - ConsumeTile(thread_aggregate, block_offset, TILE_ITEMS, Int2Type(), can_vectorize); - - CTA_SYNC(); - - // Dequeue a tile of items - if (threadIdx.x == 0) - temp_storage.dequeue_offset = queue.Drain(TILE_ITEMS) + even_share_base; - - CTA_SYNC(); - - // Grab tile offset and check if we're done with full tiles - block_offset = temp_storage.dequeue_offset; - } - - // Consume partial tile - if (block_offset < num_items) - { - int valid_items = num_items - block_offset; - ConsumeTile(thread_aggregate, block_offset, valid_items, Int2Type(), can_vectorize); - } - } - - // Compute block-wide reduction (all threads have valid items) - return BlockReduceT(temp_storage.reduce).Reduce(thread_aggregate, reduction_op); - - } - - /** - * Dequeue and reduce tiles of items as part of a inter-block reduction - */ - __device__ __forceinline__ OutputT ConsumeTiles( - OffsetT num_items, ///< [in] Total number of global input items - GridEvenShare &/*even_share*/, ///< [in] GridEvenShare descriptor - GridQueue &queue, ///< [in,out] GridQueue descriptor - Int2Type /*is_dynamic*/) ///< [in] Marker type indicating this is a dynamic mapping - { - return (IsAligned(d_in, Int2Type())) ? - ConsumeTiles(num_items, queue, Int2Type()) : - ConsumeTiles(num_items, queue, Int2Type()); - } - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/agent_reduce_by_key.cuh b/ml-xgboost/cub/cub/agent/agent_reduce_by_key.cuh deleted file mode 100644 index 28d5f49..0000000 --- a/ml-xgboost/cub/cub/agent/agent_reduce_by_key.cuh +++ /dev/null @@ -1,549 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::AgentReduceByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key. - */ - -#pragma once - -#include - -#include "single_pass_scan_operators.cuh" -#include "../block/block_load.cuh" -#include "../block/block_store.cuh" -#include "../block/block_scan.cuh" -#include "../block/block_discontinuity.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../iterator/constant_input_iterator.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Tuning policy types - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for AgentReduceByKey - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use - CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements - BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use -struct AgentReduceByKeyPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - }; - - static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use - static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements - static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use -}; - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - -/** - * \brief AgentReduceByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key - */ -template < - typename AgentReduceByKeyPolicyT, ///< Parameterized AgentReduceByKeyPolicy tuning policy type - typename KeysInputIteratorT, ///< Random-access input iterator type for keys - typename UniqueOutputIteratorT, ///< Random-access output iterator type for keys - typename ValuesInputIteratorT, ///< Random-access input iterator type for values - typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values - typename NumRunsOutputIteratorT, ///< Output iterator type for recording number of items selected - typename EqualityOpT, ///< KeyT equality operator type - typename ReductionOpT, ///< ValueT reduction operator type - typename OffsetT> ///< Signed integer type for global offsets -struct AgentReduceByKey -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - // The input keys type - typedef typename std::iterator_traits::value_type KeyInputT; - - // The output keys type - typedef typename If<(Equals::value_type, void>::VALUE), // KeyOutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type KeyOutputT; // ... else the output iterator's value type - - // The input values type - typedef typename std::iterator_traits::value_type ValueInputT; - - // The output values type - typedef typename If<(Equals::value_type, void>::VALUE), // ValueOutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type ValueOutputT; // ... else the output iterator's value type - - // Tuple type for scanning (pairs accumulated segment-value with segment-index) - typedef KeyValuePair OffsetValuePairT; - - // Tuple type for pairing keys and values - typedef KeyValuePair KeyValuePairT; - - // Tile status descriptor interface type - typedef ReduceByKeyScanTileState ScanTileStateT; - - // Guarded inequality functor - template - struct GuardedInequalityWrapper - { - _EqualityOpT op; ///< Wrapped equality operator - int num_remaining; ///< Items remaining - - /// Constructor - __host__ __device__ __forceinline__ - GuardedInequalityWrapper(_EqualityOpT op, int num_remaining) : op(op), num_remaining(num_remaining) {} - - /// Boolean inequality operator, returns (a != b) - template - __host__ __device__ __forceinline__ bool operator()(const T &a, const T &b, int idx) const - { - if (idx < num_remaining) - return !op(a, b); // In bounds - - // Return true if first out-of-bounds item, false otherwise - return (idx == num_remaining); - } - }; - - - // Constants - enum - { - BLOCK_THREADS = AgentReduceByKeyPolicyT::BLOCK_THREADS, - ITEMS_PER_THREAD = AgentReduceByKeyPolicyT::ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - TWO_PHASE_SCATTER = (ITEMS_PER_THREAD > 1), - - // Whether or not the scan operation has a zero-valued identity value (true if we're performing addition on a primitive type) - HAS_IDENTITY_ZERO = (Equals::VALUE) && (Traits::PRIMITIVE), - }; - - // Cache-modified Input iterator wrapper type (for applying cache modifier) for keys - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedValuesInputIterator - KeysInputIteratorT>::Type // Directly use the supplied input iterator type - WrappedKeysInputIteratorT; - - // Cache-modified Input iterator wrapper type (for applying cache modifier) for values - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedValuesInputIterator - ValuesInputIteratorT>::Type // Directly use the supplied input iterator type - WrappedValuesInputIteratorT; - - // Cache-modified Input iterator wrapper type (for applying cache modifier) for fixup values - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedValuesInputIterator - AggregatesOutputIteratorT>::Type // Directly use the supplied input iterator type - WrappedFixupInputIteratorT; - - // Reduce-value-by-segment scan operator - typedef ReduceBySegmentOp ReduceBySegmentOpT; - - // Parameterized BlockLoad type for keys - typedef BlockLoad< - KeyOutputT, - BLOCK_THREADS, - ITEMS_PER_THREAD, - AgentReduceByKeyPolicyT::LOAD_ALGORITHM> - BlockLoadKeysT; - - // Parameterized BlockLoad type for values - typedef BlockLoad< - ValueOutputT, - BLOCK_THREADS, - ITEMS_PER_THREAD, - AgentReduceByKeyPolicyT::LOAD_ALGORITHM> - BlockLoadValuesT; - - // Parameterized BlockDiscontinuity type for keys - typedef BlockDiscontinuity< - KeyOutputT, - BLOCK_THREADS> - BlockDiscontinuityKeys; - - // Parameterized BlockScan type - typedef BlockScan< - OffsetValuePairT, - BLOCK_THREADS, - AgentReduceByKeyPolicyT::SCAN_ALGORITHM> - BlockScanT; - - // Callback type for obtaining tile prefix during block scan - typedef TilePrefixCallbackOp< - OffsetValuePairT, - ReduceBySegmentOpT, - ScanTileStateT> - TilePrefixCallbackOpT; - - // Key and value exchange types - typedef KeyOutputT KeyExchangeT[TILE_ITEMS + 1]; - typedef ValueOutputT ValueExchangeT[TILE_ITEMS + 1]; - - // Shared memory type for this threadblock - union _TempStorage - { - struct - { - typename BlockScanT::TempStorage scan; // Smem needed for tile scanning - typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback - typename BlockDiscontinuityKeys::TempStorage discontinuity; // Smem needed for discontinuity detection - }; - - // Smem needed for loading keys - typename BlockLoadKeysT::TempStorage load_keys; - - // Smem needed for loading values - typename BlockLoadValuesT::TempStorage load_values; - - // Smem needed for compacting key value pairs(allows non POD items in this union) - Uninitialized raw_exchange; - }; - - // Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - _TempStorage& temp_storage; ///< Reference to temp_storage - WrappedKeysInputIteratorT d_keys_in; ///< Input keys - UniqueOutputIteratorT d_unique_out; ///< Unique output keys - WrappedValuesInputIteratorT d_values_in; ///< Input values - AggregatesOutputIteratorT d_aggregates_out; ///< Output value aggregates - NumRunsOutputIteratorT d_num_runs_out; ///< Output pointer for total number of segments identified - EqualityOpT equality_op; ///< KeyT equality operator - ReductionOpT reduction_op; ///< Reduction operator - ReduceBySegmentOpT scan_op; ///< Reduce-by-segment scan operator - - - //--------------------------------------------------------------------- - // Constructor - //--------------------------------------------------------------------- - - // Constructor - __device__ __forceinline__ - AgentReduceByKey( - TempStorage& temp_storage, ///< Reference to temp_storage - KeysInputIteratorT d_keys_in, ///< Input keys - UniqueOutputIteratorT d_unique_out, ///< Unique output keys - ValuesInputIteratorT d_values_in, ///< Input values - AggregatesOutputIteratorT d_aggregates_out, ///< Output value aggregates - NumRunsOutputIteratorT d_num_runs_out, ///< Output pointer for total number of segments identified - EqualityOpT equality_op, ///< KeyT equality operator - ReductionOpT reduction_op) ///< ValueT reduction operator - : - temp_storage(temp_storage.Alias()), - d_keys_in(d_keys_in), - d_unique_out(d_unique_out), - d_values_in(d_values_in), - d_aggregates_out(d_aggregates_out), - d_num_runs_out(d_num_runs_out), - equality_op(equality_op), - reduction_op(reduction_op), - scan_op(reduction_op) - {} - - - //--------------------------------------------------------------------- - // Scatter utility methods - //--------------------------------------------------------------------- - - /** - * Directly scatter flagged items to output offsets - */ - __device__ __forceinline__ void ScatterDirect( - KeyValuePairT (&scatter_items)[ITEMS_PER_THREAD], - OffsetT (&segment_flags)[ITEMS_PER_THREAD], - OffsetT (&segment_indices)[ITEMS_PER_THREAD]) - { - // Scatter flagged keys and values - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (segment_flags[ITEM]) - { - d_unique_out[segment_indices[ITEM]] = scatter_items[ITEM].key; - d_aggregates_out[segment_indices[ITEM]] = scatter_items[ITEM].value; - } - } - } - - - /** - * 2-phase scatter flagged items to output offsets - * - * The exclusive scan causes each head flag to be paired with the previous - * value aggregate: the scatter offsets must be decremented for value aggregates - */ - __device__ __forceinline__ void ScatterTwoPhase( - KeyValuePairT (&scatter_items)[ITEMS_PER_THREAD], - OffsetT (&segment_flags)[ITEMS_PER_THREAD], - OffsetT (&segment_indices)[ITEMS_PER_THREAD], - OffsetT num_tile_segments, - OffsetT num_tile_segments_prefix) - { - CTA_SYNC(); - - // Compact and scatter pairs - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (segment_flags[ITEM]) - { - temp_storage.raw_exchange.Alias()[segment_indices[ITEM] - num_tile_segments_prefix] = scatter_items[ITEM]; - } - } - - CTA_SYNC(); - - for (int item = threadIdx.x; item < num_tile_segments; item += BLOCK_THREADS) - { - KeyValuePairT pair = temp_storage.raw_exchange.Alias()[item]; - d_unique_out[num_tile_segments_prefix + item] = pair.key; - d_aggregates_out[num_tile_segments_prefix + item] = pair.value; - } - } - - - /** - * Scatter flagged items - */ - __device__ __forceinline__ void Scatter( - KeyValuePairT (&scatter_items)[ITEMS_PER_THREAD], - OffsetT (&segment_flags)[ITEMS_PER_THREAD], - OffsetT (&segment_indices)[ITEMS_PER_THREAD], - OffsetT num_tile_segments, - OffsetT num_tile_segments_prefix) - { - // Do a one-phase scatter if (a) two-phase is disabled or (b) the average number of selected items per thread is less than one - if (TWO_PHASE_SCATTER && (num_tile_segments > BLOCK_THREADS)) - { - ScatterTwoPhase( - scatter_items, - segment_flags, - segment_indices, - num_tile_segments, - num_tile_segments_prefix); - } - else - { - ScatterDirect( - scatter_items, - segment_flags, - segment_indices); - } - } - - - //--------------------------------------------------------------------- - // Cooperatively scan a device-wide sequence of tiles with other CTAs - //--------------------------------------------------------------------- - - /** - * Process a tile of input (dynamic chained scan) - */ - template ///< Whether the current tile is the last tile - __device__ __forceinline__ void ConsumeTile( - OffsetT num_remaining, ///< Number of global input items remaining (including this tile) - int tile_idx, ///< Tile index - OffsetT tile_offset, ///< Tile offset - ScanTileStateT& tile_state) ///< Global tile state descriptor - { - KeyOutputT keys[ITEMS_PER_THREAD]; // Tile keys - KeyOutputT prev_keys[ITEMS_PER_THREAD]; // Tile keys shuffled up - ValueOutputT values[ITEMS_PER_THREAD]; // Tile values - OffsetT head_flags[ITEMS_PER_THREAD]; // Segment head flags - OffsetT segment_indices[ITEMS_PER_THREAD]; // Segment indices - OffsetValuePairT scan_items[ITEMS_PER_THREAD]; // Zipped values and segment flags|indices - KeyValuePairT scatter_items[ITEMS_PER_THREAD]; // Zipped key value pairs for scattering - - // Load keys - if (IS_LAST_TILE) - BlockLoadKeysT(temp_storage.load_keys).Load(d_keys_in + tile_offset, keys, num_remaining); - else - BlockLoadKeysT(temp_storage.load_keys).Load(d_keys_in + tile_offset, keys); - - // Load tile predecessor key in first thread - KeyOutputT tile_predecessor; - if (threadIdx.x == 0) - { - tile_predecessor = (tile_idx == 0) ? - keys[0] : // First tile gets repeat of first item (thus first item will not be flagged as a head) - d_keys_in[tile_offset - 1]; // Subsequent tiles get last key from previous tile - } - - CTA_SYNC(); - - // Load values - if (IS_LAST_TILE) - BlockLoadValuesT(temp_storage.load_values).Load(d_values_in + tile_offset, values, num_remaining); - else - BlockLoadValuesT(temp_storage.load_values).Load(d_values_in + tile_offset, values); - - CTA_SYNC(); - - // Initialize head-flags and shuffle up the previous keys - if (IS_LAST_TILE) - { - // Use custom flag operator to additionally flag the first out-of-bounds item - GuardedInequalityWrapper flag_op(equality_op, num_remaining); - BlockDiscontinuityKeys(temp_storage.discontinuity).FlagHeads( - head_flags, keys, prev_keys, flag_op, tile_predecessor); - } - else - { - InequalityWrapper flag_op(equality_op); - BlockDiscontinuityKeys(temp_storage.discontinuity).FlagHeads( - head_flags, keys, prev_keys, flag_op, tile_predecessor); - } - - // Zip values and head flags - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - scan_items[ITEM].value = values[ITEM]; - scan_items[ITEM].key = head_flags[ITEM]; - } - - // Perform exclusive tile scan - OffsetValuePairT block_aggregate; // Inclusive block-wide scan aggregate - OffsetT num_segments_prefix; // Number of segments prior to this tile - ValueOutputT total_aggregate; // The tile prefix folded with block_aggregate - if (tile_idx == 0) - { - // Scan first tile - BlockScanT(temp_storage.scan).ExclusiveScan(scan_items, scan_items, scan_op, block_aggregate); - num_segments_prefix = 0; - total_aggregate = block_aggregate.value; - - // Update tile status if there are successor tiles - if ((!IS_LAST_TILE) && (threadIdx.x == 0)) - tile_state.SetInclusive(0, block_aggregate); - } - else - { - // Scan non-first tile - TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.prefix, scan_op, tile_idx); - BlockScanT(temp_storage.scan).ExclusiveScan(scan_items, scan_items, scan_op, prefix_op); - - block_aggregate = prefix_op.GetBlockAggregate(); - num_segments_prefix = prefix_op.GetExclusivePrefix().key; - total_aggregate = reduction_op( - prefix_op.GetExclusivePrefix().value, - block_aggregate.value); - } - - // Rezip scatter items and segment indices - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - scatter_items[ITEM].key = prev_keys[ITEM]; - scatter_items[ITEM].value = scan_items[ITEM].value; - segment_indices[ITEM] = scan_items[ITEM].key; - } - - // At this point, each flagged segment head has: - // - The key for the previous segment - // - The reduced value from the previous segment - // - The segment index for the reduced value - - // Scatter flagged keys and values - OffsetT num_tile_segments = block_aggregate.key; - Scatter(scatter_items, head_flags, segment_indices, num_tile_segments, num_segments_prefix); - - // Last thread in last tile will output final count (and last pair, if necessary) - if ((IS_LAST_TILE) && (threadIdx.x == BLOCK_THREADS - 1)) - { - OffsetT num_segments = num_segments_prefix + num_tile_segments; - - // If the last tile is a whole tile, output the final_value - if (num_remaining == TILE_ITEMS) - { - d_unique_out[num_segments] = keys[ITEMS_PER_THREAD - 1]; - d_aggregates_out[num_segments] = total_aggregate; - num_segments++; - } - - // Output the total number of items selected - *d_num_runs_out = num_segments; - } - } - - - /** - * Scan tiles of items as part of a dynamic chained scan - */ - __device__ __forceinline__ void ConsumeRange( - int num_items, ///< Total number of input items - ScanTileStateT& tile_state, ///< Global tile state descriptor - int start_tile) ///< The starting tile for the current grid - { - // Blocks are launched in increasing order, so just assign one tile per block - int tile_idx = start_tile + blockIdx.x; // Current tile index - OffsetT tile_offset = OffsetT(TILE_ITEMS) * tile_idx; // Global offset for the current tile - OffsetT num_remaining = num_items - tile_offset; // Remaining items (including this tile) - - if (num_remaining > TILE_ITEMS) - { - // Not last tile - ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state); - } - else if (num_remaining > 0) - { - // Last tile - ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state); - } - } - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/agent_rle.cuh b/ml-xgboost/cub/cub/agent/agent_rle.cuh deleted file mode 100644 index e899f0b..0000000 --- a/ml-xgboost/cub/cub/agent/agent_rle.cuh +++ /dev/null @@ -1,830 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::AgentRle implements a stateful abstraction of CUDA thread blocks for participating in device-wide run-length-encode. - */ - -#pragma once - -#include - -#include "single_pass_scan_operators.cuh" -#include "../block/block_load.cuh" -#include "../block/block_store.cuh" -#include "../block/block_scan.cuh" -#include "../block/block_exchange.cuh" -#include "../block/block_discontinuity.cuh" -#include "../grid/grid_queue.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../iterator/constant_input_iterator.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Tuning policy types - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for AgentRle - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use - CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements - bool _STORE_WARP_TIME_SLICING, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any store-related data transpositions (versus each warp having its own storage) - BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use -struct AgentRlePolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - STORE_WARP_TIME_SLICING = _STORE_WARP_TIME_SLICING, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any store-related data transpositions (versus each warp having its own storage) - }; - - static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use - static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements - static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use -}; - - - - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - -/** - * \brief AgentRle implements a stateful abstraction of CUDA thread blocks for participating in device-wide run-length-encode - */ -template < - typename AgentRlePolicyT, ///< Parameterized AgentRlePolicyT tuning policy type - typename InputIteratorT, ///< Random-access input iterator type for data - typename OffsetsOutputIteratorT, ///< Random-access output iterator type for offset values - typename LengthsOutputIteratorT, ///< Random-access output iterator type for length values - typename EqualityOpT, ///< T equality operator type - typename OffsetT> ///< Signed integer type for global offsets -struct AgentRle -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - /// The input value type - typedef typename std::iterator_traits::value_type T; - - /// The lengths output value type - typedef typename If<(Equals::value_type, void>::VALUE), // LengthT = (if output iterator's value type is void) ? - OffsetT, // ... then the OffsetT type, - typename std::iterator_traits::value_type>::Type LengthT; // ... else the output iterator's value type - - /// Tuple type for scanning (pairs run-length and run-index) - typedef KeyValuePair LengthOffsetPair; - - /// Tile status descriptor interface type - typedef ReduceByKeyScanTileState ScanTileStateT; - - // Constants - enum - { - WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), - BLOCK_THREADS = AgentRlePolicyT::BLOCK_THREADS, - ITEMS_PER_THREAD = AgentRlePolicyT::ITEMS_PER_THREAD, - WARP_ITEMS = WARP_THREADS * ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, - - /// Whether or not to sync after loading data - SYNC_AFTER_LOAD = (AgentRlePolicyT::LOAD_ALGORITHM != BLOCK_LOAD_DIRECT), - - /// Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any store-related data transpositions (versus each warp having its own storage) - STORE_WARP_TIME_SLICING = AgentRlePolicyT::STORE_WARP_TIME_SLICING, - ACTIVE_EXCHANGE_WARPS = (STORE_WARP_TIME_SLICING) ? 1 : WARPS, - }; - - - /** - * Special operator that signals all out-of-bounds items are not equal to everything else, - * forcing both (1) the last item to be tail-flagged and (2) all oob items to be marked - * trivial. - */ - template - struct OobInequalityOp - { - OffsetT num_remaining; - EqualityOpT equality_op; - - __device__ __forceinline__ OobInequalityOp( - OffsetT num_remaining, - EqualityOpT equality_op) - : - num_remaining(num_remaining), - equality_op(equality_op) - {} - - template - __device__ __forceinline__ bool operator()(T first, T second, Index idx) - { - if (!LAST_TILE || (idx < num_remaining)) - return !equality_op(first, second); - else - return true; - } - }; - - - // Cache-modified Input iterator wrapper type (for applying cache modifier) for data - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedVLengthnputIterator - InputIteratorT>::Type // Directly use the supplied input iterator type - WrappedInputIteratorT; - - // Parameterized BlockLoad type for data - typedef BlockLoad< - T, - AgentRlePolicyT::BLOCK_THREADS, - AgentRlePolicyT::ITEMS_PER_THREAD, - AgentRlePolicyT::LOAD_ALGORITHM> - BlockLoadT; - - // Parameterized BlockDiscontinuity type for data - typedef BlockDiscontinuity BlockDiscontinuityT; - - // Parameterized WarpScan type - typedef WarpScan WarpScanPairs; - - // Reduce-length-by-run scan operator - typedef ReduceBySegmentOp ReduceBySegmentOpT; - - // Callback type for obtaining tile prefix during block scan - typedef TilePrefixCallbackOp< - LengthOffsetPair, - ReduceBySegmentOpT, - ScanTileStateT> - TilePrefixCallbackOpT; - - // Warp exchange types - typedef WarpExchange WarpExchangePairs; - - typedef typename If::Type WarpExchangePairsStorage; - - typedef WarpExchange WarpExchangeOffsets; - typedef WarpExchange WarpExchangeLengths; - - typedef LengthOffsetPair WarpAggregates[WARPS]; - - // Shared memory type for this threadblock - struct _TempStorage - { - union - { - struct - { - typename BlockDiscontinuityT::TempStorage discontinuity; // Smem needed for discontinuity detection - typename WarpScanPairs::TempStorage warp_scan[WARPS]; // Smem needed for warp-synchronous scans - Uninitialized warp_aggregates; // Smem needed for sharing warp-wide aggregates - typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback - }; - - // Smem needed for input loading - typename BlockLoadT::TempStorage load; - - // Smem needed for two-phase scatter - union - { - unsigned long long align; - WarpExchangePairsStorage exchange_pairs[ACTIVE_EXCHANGE_WARPS]; - typename WarpExchangeOffsets::TempStorage exchange_offsets[ACTIVE_EXCHANGE_WARPS]; - typename WarpExchangeLengths::TempStorage exchange_lengths[ACTIVE_EXCHANGE_WARPS]; - }; - }; - - OffsetT tile_idx; // Shared tile index - LengthOffsetPair tile_inclusive; // Inclusive tile prefix - LengthOffsetPair tile_exclusive; // Exclusive tile prefix - }; - - // Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - _TempStorage& temp_storage; ///< Reference to temp_storage - - WrappedInputIteratorT d_in; ///< Pointer to input sequence of data items - OffsetsOutputIteratorT d_offsets_out; ///< Input run offsets - LengthsOutputIteratorT d_lengths_out; ///< Output run lengths - - EqualityOpT equality_op; ///< T equality operator - ReduceBySegmentOpT scan_op; ///< Reduce-length-by-flag scan operator - OffsetT num_items; ///< Total number of input items - - - //--------------------------------------------------------------------- - // Constructor - //--------------------------------------------------------------------- - - // Constructor - __device__ __forceinline__ - AgentRle( - TempStorage &temp_storage, ///< [in] Reference to temp_storage - InputIteratorT d_in, ///< [in] Pointer to input sequence of data items - OffsetsOutputIteratorT d_offsets_out, ///< [out] Pointer to output sequence of run offsets - LengthsOutputIteratorT d_lengths_out, ///< [out] Pointer to output sequence of run lengths - EqualityOpT equality_op, ///< [in] T equality operator - OffsetT num_items) ///< [in] Total number of input items - : - temp_storage(temp_storage.Alias()), - d_in(d_in), - d_offsets_out(d_offsets_out), - d_lengths_out(d_lengths_out), - equality_op(equality_op), - scan_op(cub::Sum()), - num_items(num_items) - {} - - - //--------------------------------------------------------------------- - // Utility methods for initializing the selections - //--------------------------------------------------------------------- - - template - __device__ __forceinline__ void InitializeSelections( - OffsetT tile_offset, - OffsetT num_remaining, - T (&items)[ITEMS_PER_THREAD], - LengthOffsetPair (&lengths_and_num_runs)[ITEMS_PER_THREAD]) - { - bool head_flags[ITEMS_PER_THREAD]; - bool tail_flags[ITEMS_PER_THREAD]; - - OobInequalityOp inequality_op(num_remaining, equality_op); - - if (FIRST_TILE && LAST_TILE) - { - // First-and-last-tile always head-flags the first item and tail-flags the last item - - BlockDiscontinuityT(temp_storage.discontinuity).FlagHeadsAndTails( - head_flags, tail_flags, items, inequality_op); - } - else if (FIRST_TILE) - { - // First-tile always head-flags the first item - - // Get the first item from the next tile - T tile_successor_item; - if (threadIdx.x == BLOCK_THREADS - 1) - tile_successor_item = d_in[tile_offset + TILE_ITEMS]; - - BlockDiscontinuityT(temp_storage.discontinuity).FlagHeadsAndTails( - head_flags, tail_flags, tile_successor_item, items, inequality_op); - } - else if (LAST_TILE) - { - // Last-tile always flags the last item - - // Get the last item from the previous tile - T tile_predecessor_item; - if (threadIdx.x == 0) - tile_predecessor_item = d_in[tile_offset - 1]; - - BlockDiscontinuityT(temp_storage.discontinuity).FlagHeadsAndTails( - head_flags, tile_predecessor_item, tail_flags, items, inequality_op); - } - else - { - // Get the first item from the next tile - T tile_successor_item; - if (threadIdx.x == BLOCK_THREADS - 1) - tile_successor_item = d_in[tile_offset + TILE_ITEMS]; - - // Get the last item from the previous tile - T tile_predecessor_item; - if (threadIdx.x == 0) - tile_predecessor_item = d_in[tile_offset - 1]; - - BlockDiscontinuityT(temp_storage.discontinuity).FlagHeadsAndTails( - head_flags, tile_predecessor_item, tail_flags, tile_successor_item, items, inequality_op); - } - - // Zip counts and runs - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - lengths_and_num_runs[ITEM].key = head_flags[ITEM] && (!tail_flags[ITEM]); - lengths_and_num_runs[ITEM].value = ((!head_flags[ITEM]) || (!tail_flags[ITEM])); - } - } - - //--------------------------------------------------------------------- - // Scan utility methods - //--------------------------------------------------------------------- - - /** - * Scan of allocations - */ - __device__ __forceinline__ void WarpScanAllocations( - LengthOffsetPair &tile_aggregate, - LengthOffsetPair &warp_aggregate, - LengthOffsetPair &warp_exclusive_in_tile, - LengthOffsetPair &thread_exclusive_in_warp, - LengthOffsetPair (&lengths_and_num_runs)[ITEMS_PER_THREAD]) - { - // Perform warpscans - unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS); - int lane_id = LaneId(); - - LengthOffsetPair identity; - identity.key = 0; - identity.value = 0; - - LengthOffsetPair thread_inclusive; - LengthOffsetPair thread_aggregate = ThreadReduce(lengths_and_num_runs, scan_op); - WarpScanPairs(temp_storage.warp_scan[warp_id]).Scan( - thread_aggregate, - thread_inclusive, - thread_exclusive_in_warp, - identity, - scan_op); - - // Last lane in each warp shares its warp-aggregate - if (lane_id == WARP_THREADS - 1) - temp_storage.warp_aggregates.Alias()[warp_id] = thread_inclusive; - - CTA_SYNC(); - - // Accumulate total selected and the warp-wide prefix - warp_exclusive_in_tile = identity; - warp_aggregate = temp_storage.warp_aggregates.Alias()[warp_id]; - tile_aggregate = temp_storage.warp_aggregates.Alias()[0]; - - #pragma unroll - for (int WARP = 1; WARP < WARPS; ++WARP) - { - if (warp_id == WARP) - warp_exclusive_in_tile = tile_aggregate; - - tile_aggregate = scan_op(tile_aggregate, temp_storage.warp_aggregates.Alias()[WARP]); - } - } - - - //--------------------------------------------------------------------- - // Utility methods for scattering selections - //--------------------------------------------------------------------- - - /** - * Two-phase scatter, specialized for warp time-slicing - */ - template - __device__ __forceinline__ void ScatterTwoPhase( - OffsetT tile_num_runs_exclusive_in_global, - OffsetT warp_num_runs_aggregate, - OffsetT warp_num_runs_exclusive_in_tile, - OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD], - LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD], - Int2Type is_warp_time_slice) - { - unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS); - int lane_id = LaneId(); - - // Locally compact items within the warp (first warp) - if (warp_id == 0) - { - WarpExchangePairs(temp_storage.exchange_pairs[0]).ScatterToStriped(lengths_and_offsets, thread_num_runs_exclusive_in_warp); - } - - // Locally compact items within the warp (remaining warps) - #pragma unroll - for (int SLICE = 1; SLICE < WARPS; ++SLICE) - { - CTA_SYNC(); - - if (warp_id == SLICE) - { - WarpExchangePairs(temp_storage.exchange_pairs[0]).ScatterToStriped(lengths_and_offsets, thread_num_runs_exclusive_in_warp); - } - } - - // Global scatter - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if ((ITEM * WARP_THREADS) < warp_num_runs_aggregate - lane_id) - { - OffsetT item_offset = - tile_num_runs_exclusive_in_global + - warp_num_runs_exclusive_in_tile + - (ITEM * WARP_THREADS) + lane_id; - - // Scatter offset - d_offsets_out[item_offset] = lengths_and_offsets[ITEM].key; - - // Scatter length if not the first (global) length - if ((!FIRST_TILE) || (ITEM != 0) || (threadIdx.x > 0)) - { - d_lengths_out[item_offset - 1] = lengths_and_offsets[ITEM].value; - } - } - } - } - - - /** - * Two-phase scatter - */ - template - __device__ __forceinline__ void ScatterTwoPhase( - OffsetT tile_num_runs_exclusive_in_global, - OffsetT warp_num_runs_aggregate, - OffsetT warp_num_runs_exclusive_in_tile, - OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD], - LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD], - Int2Type is_warp_time_slice) - { - unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS); - int lane_id = LaneId(); - - // Unzip - OffsetT run_offsets[ITEMS_PER_THREAD]; - LengthT run_lengths[ITEMS_PER_THREAD]; - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - run_offsets[ITEM] = lengths_and_offsets[ITEM].key; - run_lengths[ITEM] = lengths_and_offsets[ITEM].value; - } - - WarpExchangeOffsets(temp_storage.exchange_offsets[warp_id]).ScatterToStriped(run_offsets, thread_num_runs_exclusive_in_warp); - - WARP_SYNC(0xffffffff); - - WarpExchangeLengths(temp_storage.exchange_lengths[warp_id]).ScatterToStriped(run_lengths, thread_num_runs_exclusive_in_warp); - - // Global scatter - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if ((ITEM * WARP_THREADS) + lane_id < warp_num_runs_aggregate) - { - OffsetT item_offset = - tile_num_runs_exclusive_in_global + - warp_num_runs_exclusive_in_tile + - (ITEM * WARP_THREADS) + lane_id; - - // Scatter offset - d_offsets_out[item_offset] = run_offsets[ITEM]; - - // Scatter length if not the first (global) length - if ((!FIRST_TILE) || (ITEM != 0) || (threadIdx.x > 0)) - { - d_lengths_out[item_offset - 1] = run_lengths[ITEM]; - } - } - } - } - - - /** - * Direct scatter - */ - template - __device__ __forceinline__ void ScatterDirect( - OffsetT tile_num_runs_exclusive_in_global, - OffsetT warp_num_runs_aggregate, - OffsetT warp_num_runs_exclusive_in_tile, - OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD], - LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD]) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (thread_num_runs_exclusive_in_warp[ITEM] < warp_num_runs_aggregate) - { - OffsetT item_offset = - tile_num_runs_exclusive_in_global + - warp_num_runs_exclusive_in_tile + - thread_num_runs_exclusive_in_warp[ITEM]; - - // Scatter offset - d_offsets_out[item_offset] = lengths_and_offsets[ITEM].key; - - // Scatter length if not the first (global) length - if (item_offset >= 1) - { - d_lengths_out[item_offset - 1] = lengths_and_offsets[ITEM].value; - } - } - } - } - - - /** - * Scatter - */ - template - __device__ __forceinline__ void Scatter( - OffsetT tile_num_runs_aggregate, - OffsetT tile_num_runs_exclusive_in_global, - OffsetT warp_num_runs_aggregate, - OffsetT warp_num_runs_exclusive_in_tile, - OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD], - LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD]) - { - if ((ITEMS_PER_THREAD == 1) || (tile_num_runs_aggregate < BLOCK_THREADS)) - { - // Direct scatter if the warp has any items - if (warp_num_runs_aggregate) - { - ScatterDirect( - tile_num_runs_exclusive_in_global, - warp_num_runs_aggregate, - warp_num_runs_exclusive_in_tile, - thread_num_runs_exclusive_in_warp, - lengths_and_offsets); - } - } - else - { - // Scatter two phase - ScatterTwoPhase( - tile_num_runs_exclusive_in_global, - warp_num_runs_aggregate, - warp_num_runs_exclusive_in_tile, - thread_num_runs_exclusive_in_warp, - lengths_and_offsets, - Int2Type()); - } - } - - - - //--------------------------------------------------------------------- - // Cooperatively scan a device-wide sequence of tiles with other CTAs - //--------------------------------------------------------------------- - - /** - * Process a tile of input (dynamic chained scan) - */ - template < - bool LAST_TILE> - __device__ __forceinline__ LengthOffsetPair ConsumeTile( - OffsetT num_items, ///< Total number of global input items - OffsetT num_remaining, ///< Number of global input items remaining (including this tile) - int tile_idx, ///< Tile index - OffsetT tile_offset, ///< Tile offset - ScanTileStateT &tile_status) ///< Global list of tile status - { - if (tile_idx == 0) - { - // First tile - - // Load items - T items[ITEMS_PER_THREAD]; - if (LAST_TILE) - BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items, num_remaining, T()); - else - BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items); - - if (SYNC_AFTER_LOAD) - CTA_SYNC(); - - // Set flags - LengthOffsetPair lengths_and_num_runs[ITEMS_PER_THREAD]; - - InitializeSelections( - tile_offset, - num_remaining, - items, - lengths_and_num_runs); - - // Exclusive scan of lengths and runs - LengthOffsetPair tile_aggregate; - LengthOffsetPair warp_aggregate; - LengthOffsetPair warp_exclusive_in_tile; - LengthOffsetPair thread_exclusive_in_warp; - - WarpScanAllocations( - tile_aggregate, - warp_aggregate, - warp_exclusive_in_tile, - thread_exclusive_in_warp, - lengths_and_num_runs); - - // Update tile status if this is not the last tile - if (!LAST_TILE && (threadIdx.x == 0)) - tile_status.SetInclusive(0, tile_aggregate); - - // Update thread_exclusive_in_warp to fold in warp run-length - if (thread_exclusive_in_warp.key == 0) - thread_exclusive_in_warp.value += warp_exclusive_in_tile.value; - - LengthOffsetPair lengths_and_offsets[ITEMS_PER_THREAD]; - OffsetT thread_num_runs_exclusive_in_warp[ITEMS_PER_THREAD]; - LengthOffsetPair lengths_and_num_runs2[ITEMS_PER_THREAD]; - - // Downsweep scan through lengths_and_num_runs - ThreadScanExclusive(lengths_and_num_runs, lengths_and_num_runs2, scan_op, thread_exclusive_in_warp); - - // Zip - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - lengths_and_offsets[ITEM].value = lengths_and_num_runs2[ITEM].value; - lengths_and_offsets[ITEM].key = tile_offset + (threadIdx.x * ITEMS_PER_THREAD) + ITEM; - thread_num_runs_exclusive_in_warp[ITEM] = (lengths_and_num_runs[ITEM].key) ? - lengths_and_num_runs2[ITEM].key : // keep - WARP_THREADS * ITEMS_PER_THREAD; // discard - } - - OffsetT tile_num_runs_aggregate = tile_aggregate.key; - OffsetT tile_num_runs_exclusive_in_global = 0; - OffsetT warp_num_runs_aggregate = warp_aggregate.key; - OffsetT warp_num_runs_exclusive_in_tile = warp_exclusive_in_tile.key; - - // Scatter - Scatter( - tile_num_runs_aggregate, - tile_num_runs_exclusive_in_global, - warp_num_runs_aggregate, - warp_num_runs_exclusive_in_tile, - thread_num_runs_exclusive_in_warp, - lengths_and_offsets); - - // Return running total (inclusive of this tile) - return tile_aggregate; - } - else - { - // Not first tile - - // Load items - T items[ITEMS_PER_THREAD]; - if (LAST_TILE) - BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items, num_remaining, T()); - else - BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items); - - if (SYNC_AFTER_LOAD) - CTA_SYNC(); - - // Set flags - LengthOffsetPair lengths_and_num_runs[ITEMS_PER_THREAD]; - - InitializeSelections( - tile_offset, - num_remaining, - items, - lengths_and_num_runs); - - // Exclusive scan of lengths and runs - LengthOffsetPair tile_aggregate; - LengthOffsetPair warp_aggregate; - LengthOffsetPair warp_exclusive_in_tile; - LengthOffsetPair thread_exclusive_in_warp; - - WarpScanAllocations( - tile_aggregate, - warp_aggregate, - warp_exclusive_in_tile, - thread_exclusive_in_warp, - lengths_and_num_runs); - - // First warp computes tile prefix in lane 0 - TilePrefixCallbackOpT prefix_op(tile_status, temp_storage.prefix, Sum(), tile_idx); - unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS); - if (warp_id == 0) - { - prefix_op(tile_aggregate); - if (threadIdx.x == 0) - temp_storage.tile_exclusive = prefix_op.exclusive_prefix; - } - - CTA_SYNC(); - - LengthOffsetPair tile_exclusive_in_global = temp_storage.tile_exclusive; - - // Update thread_exclusive_in_warp to fold in warp and tile run-lengths - LengthOffsetPair thread_exclusive = scan_op(tile_exclusive_in_global, warp_exclusive_in_tile); - if (thread_exclusive_in_warp.key == 0) - thread_exclusive_in_warp.value += thread_exclusive.value; - - // Downsweep scan through lengths_and_num_runs - LengthOffsetPair lengths_and_num_runs2[ITEMS_PER_THREAD]; - LengthOffsetPair lengths_and_offsets[ITEMS_PER_THREAD]; - OffsetT thread_num_runs_exclusive_in_warp[ITEMS_PER_THREAD]; - - ThreadScanExclusive(lengths_and_num_runs, lengths_and_num_runs2, scan_op, thread_exclusive_in_warp); - - // Zip - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - lengths_and_offsets[ITEM].value = lengths_and_num_runs2[ITEM].value; - lengths_and_offsets[ITEM].key = tile_offset + (threadIdx.x * ITEMS_PER_THREAD) + ITEM; - thread_num_runs_exclusive_in_warp[ITEM] = (lengths_and_num_runs[ITEM].key) ? - lengths_and_num_runs2[ITEM].key : // keep - WARP_THREADS * ITEMS_PER_THREAD; // discard - } - - OffsetT tile_num_runs_aggregate = tile_aggregate.key; - OffsetT tile_num_runs_exclusive_in_global = tile_exclusive_in_global.key; - OffsetT warp_num_runs_aggregate = warp_aggregate.key; - OffsetT warp_num_runs_exclusive_in_tile = warp_exclusive_in_tile.key; - - // Scatter - Scatter( - tile_num_runs_aggregate, - tile_num_runs_exclusive_in_global, - warp_num_runs_aggregate, - warp_num_runs_exclusive_in_tile, - thread_num_runs_exclusive_in_warp, - lengths_and_offsets); - - // Return running total (inclusive of this tile) - return prefix_op.inclusive_prefix; - } - } - - - /** - * Scan tiles of items as part of a dynamic chained scan - */ - template ///< Output iterator type for recording number of items selected - __device__ __forceinline__ void ConsumeRange( - int num_tiles, ///< Total number of input tiles - ScanTileStateT& tile_status, ///< Global list of tile status - NumRunsIteratorT d_num_runs_out) ///< Output pointer for total number of runs identified - { - // Blocks are launched in increasing order, so just assign one tile per block - int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index - OffsetT tile_offset = tile_idx * TILE_ITEMS; // Global offset for the current tile - OffsetT num_remaining = num_items - tile_offset; // Remaining items (including this tile) - - if (tile_idx < num_tiles - 1) - { - // Not the last tile (full) - ConsumeTile(num_items, num_remaining, tile_idx, tile_offset, tile_status); - } - else if (num_remaining > 0) - { - // The last tile (possibly partially-full) - LengthOffsetPair running_total = ConsumeTile(num_items, num_remaining, tile_idx, tile_offset, tile_status); - - if (threadIdx.x == 0) - { - // Output the total number of items selected - *d_num_runs_out = running_total.key; - - // The inclusive prefix contains accumulated length reduction for the last run - if (running_total.key > 0) - d_lengths_out[running_total.key - 1] = running_total.value; - } - } - } -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/agent_scan.cuh b/ml-xgboost/cub/cub/agent/agent_scan.cuh deleted file mode 100644 index ef80ccb..0000000 --- a/ml-xgboost/cub/cub/agent/agent_scan.cuh +++ /dev/null @@ -1,471 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::AgentScan implements a stateful abstraction of CUDA thread blocks for participating in device-wide prefix scan . - */ - -#pragma once - -#include - -#include "single_pass_scan_operators.cuh" -#include "../block/block_load.cuh" -#include "../block/block_store.cuh" -#include "../block/block_scan.cuh" -#include "../grid/grid_queue.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Tuning policy types - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for AgentScan - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use - CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements - BlockStoreAlgorithm _STORE_ALGORITHM, ///< The BlockStore algorithm to use - BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use -struct AgentScanPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - }; - - static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use - static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements - static const BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM; ///< The BlockStore algorithm to use - static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use -}; - - - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - -/** - * \brief AgentScan implements a stateful abstraction of CUDA thread blocks for participating in device-wide prefix scan . - */ -template < - typename AgentScanPolicyT, ///< Parameterized AgentScanPolicyT tuning policy type - typename InputIteratorT, ///< Random-access input iterator type - typename OutputIteratorT, ///< Random-access output iterator type - typename ScanOpT, ///< Scan functor type - typename InitValueT, ///< The init_value element for ScanOpT type (cub::NullType for inclusive scan) - typename OffsetT> ///< Signed integer type for global offsets -struct AgentScan -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - // Tile status descriptor interface type - typedef ScanTileState ScanTileStateT; - - // Input iterator wrapper type (for applying cache modifier) - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedInputIterator - InputIteratorT>::Type // Directly use the supplied input iterator type - WrappedInputIteratorT; - - // Constants - enum - { - IS_INCLUSIVE = Equals::VALUE, // Inclusive scan if no init_value type is provided - BLOCK_THREADS = AgentScanPolicyT::BLOCK_THREADS, - ITEMS_PER_THREAD = AgentScanPolicyT::ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - }; - - // Parameterized BlockLoad type - typedef BlockLoad< - OutputT, - AgentScanPolicyT::BLOCK_THREADS, - AgentScanPolicyT::ITEMS_PER_THREAD, - AgentScanPolicyT::LOAD_ALGORITHM> - BlockLoadT; - - // Parameterized BlockStore type - typedef BlockStore< - OutputT, - AgentScanPolicyT::BLOCK_THREADS, - AgentScanPolicyT::ITEMS_PER_THREAD, - AgentScanPolicyT::STORE_ALGORITHM> - BlockStoreT; - - // Parameterized BlockScan type - typedef BlockScan< - OutputT, - AgentScanPolicyT::BLOCK_THREADS, - AgentScanPolicyT::SCAN_ALGORITHM> - BlockScanT; - - // Callback type for obtaining tile prefix during block scan - typedef TilePrefixCallbackOp< - OutputT, - ScanOpT, - ScanTileStateT> - TilePrefixCallbackOpT; - - // Stateful BlockScan prefix callback type for managing a running total while scanning consecutive tiles - typedef BlockScanRunningPrefixOp< - OutputT, - ScanOpT> - RunningPrefixCallbackOp; - - // Shared memory type for this threadblock - union _TempStorage - { - typename BlockLoadT::TempStorage load; // Smem needed for tile loading - typename BlockStoreT::TempStorage store; // Smem needed for tile storing - - struct - { - typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback - typename BlockScanT::TempStorage scan; // Smem needed for tile scanning - }; - }; - - // Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - _TempStorage& temp_storage; ///< Reference to temp_storage - WrappedInputIteratorT d_in; ///< Input data - OutputIteratorT d_out; ///< Output data - ScanOpT scan_op; ///< Binary scan operator - InitValueT init_value; ///< The init_value element for ScanOpT - - - //--------------------------------------------------------------------- - // Block scan utility methods - //--------------------------------------------------------------------- - - /** - * Exclusive scan specialization (first tile) - */ - __device__ __forceinline__ - void ScanTile( - OutputT (&items)[ITEMS_PER_THREAD], - OutputT init_value, - ScanOpT scan_op, - OutputT &block_aggregate, - Int2Type /*is_inclusive*/) - { - BlockScanT(temp_storage.scan).ExclusiveScan(items, items, init_value, scan_op, block_aggregate); - block_aggregate = scan_op(init_value, block_aggregate); - } - - - /** - * Inclusive scan specialization (first tile) - */ - __device__ __forceinline__ - void ScanTile( - OutputT (&items)[ITEMS_PER_THREAD], - InitValueT /*init_value*/, - ScanOpT scan_op, - OutputT &block_aggregate, - Int2Type /*is_inclusive*/) - { - BlockScanT(temp_storage.scan).InclusiveScan(items, items, scan_op, block_aggregate); - } - - - /** - * Exclusive scan specialization (subsequent tiles) - */ - template - __device__ __forceinline__ - void ScanTile( - OutputT (&items)[ITEMS_PER_THREAD], - ScanOpT scan_op, - PrefixCallback &prefix_op, - Int2Type /*is_inclusive*/) - { - BlockScanT(temp_storage.scan).ExclusiveScan(items, items, scan_op, prefix_op); - } - - - /** - * Inclusive scan specialization (subsequent tiles) - */ - template - __device__ __forceinline__ - void ScanTile( - OutputT (&items)[ITEMS_PER_THREAD], - ScanOpT scan_op, - PrefixCallback &prefix_op, - Int2Type /*is_inclusive*/) - { - BlockScanT(temp_storage.scan).InclusiveScan(items, items, scan_op, prefix_op); - } - - - //--------------------------------------------------------------------- - // Constructor - //--------------------------------------------------------------------- - - // Constructor - __device__ __forceinline__ - AgentScan( - TempStorage& temp_storage, ///< Reference to temp_storage - InputIteratorT d_in, ///< Input data - OutputIteratorT d_out, ///< Output data - ScanOpT scan_op, ///< Binary scan operator - InitValueT init_value) ///< Initial value to seed the exclusive scan - : - temp_storage(temp_storage.Alias()), - d_in(d_in), - d_out(d_out), - scan_op(scan_op), - init_value(init_value) - {} - - - //--------------------------------------------------------------------- - // Cooperatively scan a device-wide sequence of tiles with other CTAs - //--------------------------------------------------------------------- - - /** - * Process a tile of input (dynamic chained scan) - */ - template ///< Whether the current tile is the last tile - __device__ __forceinline__ void ConsumeTile( - OffsetT num_remaining, ///< Number of global input items remaining (including this tile) - int tile_idx, ///< Tile index - OffsetT tile_offset, ///< Tile offset - ScanTileStateT& tile_state) ///< Global tile state descriptor - { - // Load items - OutputT items[ITEMS_PER_THREAD]; - - if (IS_LAST_TILE) - BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items, num_remaining); - else - BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items); - - CTA_SYNC(); - - // Perform tile scan - if (tile_idx == 0) - { - // Scan first tile - OutputT block_aggregate; - ScanTile(items, init_value, scan_op, block_aggregate, Int2Type()); - if ((!IS_LAST_TILE) && (threadIdx.x == 0)) - tile_state.SetInclusive(0, block_aggregate); - } - else - { - // Scan non-first tile - TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.prefix, scan_op, tile_idx); - ScanTile(items, scan_op, prefix_op, Int2Type()); - } - - CTA_SYNC(); - - // Store items - if (IS_LAST_TILE) - BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items, num_remaining); - else - BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items); - } - - - /** - * Scan tiles of items as part of a dynamic chained scan - */ - __device__ __forceinline__ void ConsumeRange( - int num_items, ///< Total number of input items - ScanTileStateT& tile_state, ///< Global tile state descriptor - int start_tile) ///< The starting tile for the current grid - { - // Blocks are launched in increasing order, so just assign one tile per block - int tile_idx = start_tile + blockIdx.x; // Current tile index - OffsetT tile_offset = OffsetT(TILE_ITEMS) * tile_idx; // Global offset for the current tile - OffsetT num_remaining = num_items - tile_offset; // Remaining items (including this tile) - - if (num_remaining > TILE_ITEMS) - { - // Not last tile - ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state); - } - else if (num_remaining > 0) - { - // Last tile - ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state); - } - } - - - //--------------------------------------------------------------------- - // Scan an sequence of consecutive tiles (independent of other thread blocks) - //--------------------------------------------------------------------- - - /** - * Process a tile of input - */ - template < - bool IS_FIRST_TILE, - bool IS_LAST_TILE> - __device__ __forceinline__ void ConsumeTile( - OffsetT tile_offset, ///< Tile offset - RunningPrefixCallbackOp& prefix_op, ///< Running prefix operator - int valid_items = TILE_ITEMS) ///< Number of valid items in the tile - { - // Load items - OutputT items[ITEMS_PER_THREAD]; - - if (IS_LAST_TILE) - BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items, valid_items); - else - BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items); - - CTA_SYNC(); - - // Block scan - if (IS_FIRST_TILE) - { - OutputT block_aggregate; - ScanTile(items, init_value, scan_op, block_aggregate, Int2Type()); - prefix_op.running_total = block_aggregate; - } - else - { - ScanTile(items, scan_op, prefix_op, Int2Type()); - } - - CTA_SYNC(); - - // Store items - if (IS_LAST_TILE) - BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items, valid_items); - else - BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items); - } - - - /** - * Scan a consecutive share of input tiles - */ - __device__ __forceinline__ void ConsumeRange( - OffsetT range_offset, ///< [in] Threadblock begin offset (inclusive) - OffsetT range_end) ///< [in] Threadblock end offset (exclusive) - { - BlockScanRunningPrefixOp prefix_op(scan_op); - - if (range_offset + TILE_ITEMS <= range_end) - { - // Consume first tile of input (full) - ConsumeTile(range_offset, prefix_op); - range_offset += TILE_ITEMS; - - // Consume subsequent full tiles of input - while (range_offset + TILE_ITEMS <= range_end) - { - ConsumeTile(range_offset, prefix_op); - range_offset += TILE_ITEMS; - } - - // Consume a partially-full tile - if (range_offset < range_end) - { - int valid_items = range_end - range_offset; - ConsumeTile(range_offset, prefix_op, valid_items); - } - } - else - { - // Consume the first tile of input (partially-full) - int valid_items = range_end - range_offset; - ConsumeTile(range_offset, prefix_op, valid_items); - } - } - - - /** - * Scan a consecutive share of input tiles, seeded with the specified prefix value - */ - __device__ __forceinline__ void ConsumeRange( - OffsetT range_offset, ///< [in] Threadblock begin offset (inclusive) - OffsetT range_end, ///< [in] Threadblock end offset (exclusive) - OutputT prefix) ///< [in] The prefix to apply to the scan segment - { - BlockScanRunningPrefixOp prefix_op(prefix, scan_op); - - // Consume full tiles of input - while (range_offset + TILE_ITEMS <= range_end) - { - ConsumeTile(range_offset, prefix_op); - range_offset += TILE_ITEMS; - } - - // Consume a partially-full tile - if (range_offset < range_end) - { - int valid_items = range_end - range_offset; - ConsumeTile(range_offset, prefix_op, valid_items); - } - } - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/agent_segment_fixup.cuh b/ml-xgboost/cub/cub/agent/agent_segment_fixup.cuh deleted file mode 100644 index b221cad..0000000 --- a/ml-xgboost/cub/cub/agent/agent_segment_fixup.cuh +++ /dev/null @@ -1,375 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::AgentSegmentFixup implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key. - */ - -#pragma once - -#include - -#include "single_pass_scan_operators.cuh" -#include "../block/block_load.cuh" -#include "../block/block_store.cuh" -#include "../block/block_scan.cuh" -#include "../block/block_discontinuity.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../iterator/constant_input_iterator.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Tuning policy types - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for AgentSegmentFixup - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use - CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements - BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use -struct AgentSegmentFixupPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - }; - - static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use - static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements - static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use -}; - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - -/** - * \brief AgentSegmentFixup implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key - */ -template < - typename AgentSegmentFixupPolicyT, ///< Parameterized AgentSegmentFixupPolicy tuning policy type - typename PairsInputIteratorT, ///< Random-access input iterator type for keys - typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values - typename EqualityOpT, ///< KeyT equality operator type - typename ReductionOpT, ///< ValueT reduction operator type - typename OffsetT> ///< Signed integer type for global offsets -struct AgentSegmentFixup -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - // Data type of key-value input iterator - typedef typename std::iterator_traits::value_type KeyValuePairT; - - // Value type - typedef typename KeyValuePairT::Value ValueT; - - // Tile status descriptor interface type - typedef ReduceByKeyScanTileState ScanTileStateT; - - // Constants - enum - { - BLOCK_THREADS = AgentSegmentFixupPolicyT::BLOCK_THREADS, - ITEMS_PER_THREAD = AgentSegmentFixupPolicyT::ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - - // Whether or not do fixup using RLE + global atomics - USE_ATOMIC_FIXUP = (CUB_PTX_ARCH >= 350) && - (Equals::VALUE || - Equals::VALUE || - Equals::VALUE || - Equals::VALUE), - - // Whether or not the scan operation has a zero-valued identity value (true if we're performing addition on a primitive type) - HAS_IDENTITY_ZERO = (Equals::VALUE) && (Traits::PRIMITIVE), - }; - - // Cache-modified Input iterator wrapper type (for applying cache modifier) for keys - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedValuesInputIterator - PairsInputIteratorT>::Type // Directly use the supplied input iterator type - WrappedPairsInputIteratorT; - - // Cache-modified Input iterator wrapper type (for applying cache modifier) for fixup values - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedValuesInputIterator - AggregatesOutputIteratorT>::Type // Directly use the supplied input iterator type - WrappedFixupInputIteratorT; - - // Reduce-value-by-segment scan operator - typedef ReduceByKeyOp ReduceBySegmentOpT; - - // Parameterized BlockLoad type for pairs - typedef BlockLoad< - KeyValuePairT, - BLOCK_THREADS, - ITEMS_PER_THREAD, - AgentSegmentFixupPolicyT::LOAD_ALGORITHM> - BlockLoadPairs; - - // Parameterized BlockScan type - typedef BlockScan< - KeyValuePairT, - BLOCK_THREADS, - AgentSegmentFixupPolicyT::SCAN_ALGORITHM> - BlockScanT; - - // Callback type for obtaining tile prefix during block scan - typedef TilePrefixCallbackOp< - KeyValuePairT, - ReduceBySegmentOpT, - ScanTileStateT> - TilePrefixCallbackOpT; - - // Shared memory type for this threadblock - union _TempStorage - { - struct - { - typename BlockScanT::TempStorage scan; // Smem needed for tile scanning - typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback - }; - - // Smem needed for loading keys - typename BlockLoadPairs::TempStorage load_pairs; - }; - - // Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - _TempStorage& temp_storage; ///< Reference to temp_storage - WrappedPairsInputIteratorT d_pairs_in; ///< Input keys - AggregatesOutputIteratorT d_aggregates_out; ///< Output value aggregates - WrappedFixupInputIteratorT d_fixup_in; ///< Fixup input values - InequalityWrapper inequality_op; ///< KeyT inequality operator - ReductionOpT reduction_op; ///< Reduction operator - ReduceBySegmentOpT scan_op; ///< Reduce-by-segment scan operator - - - //--------------------------------------------------------------------- - // Constructor - //--------------------------------------------------------------------- - - // Constructor - __device__ __forceinline__ - AgentSegmentFixup( - TempStorage& temp_storage, ///< Reference to temp_storage - PairsInputIteratorT d_pairs_in, ///< Input keys - AggregatesOutputIteratorT d_aggregates_out, ///< Output value aggregates - EqualityOpT equality_op, ///< KeyT equality operator - ReductionOpT reduction_op) ///< ValueT reduction operator - : - temp_storage(temp_storage.Alias()), - d_pairs_in(d_pairs_in), - d_aggregates_out(d_aggregates_out), - d_fixup_in(d_aggregates_out), - inequality_op(equality_op), - reduction_op(reduction_op), - scan_op(reduction_op) - {} - - - //--------------------------------------------------------------------- - // Cooperatively scan a device-wide sequence of tiles with other CTAs - //--------------------------------------------------------------------- - - - /** - * Process input tile. Specialized for atomic-fixup - */ - template - __device__ __forceinline__ void ConsumeTile( - OffsetT num_remaining, ///< Number of global input items remaining (including this tile) - int tile_idx, ///< Tile index - OffsetT tile_offset, ///< Tile offset - ScanTileStateT& tile_state, ///< Global tile state descriptor - Int2Type use_atomic_fixup) ///< Marker whether to use atomicAdd (instead of reduce-by-key) - { - KeyValuePairT pairs[ITEMS_PER_THREAD]; - - // Load pairs - KeyValuePairT oob_pair; - oob_pair.key = -1; - - if (IS_LAST_TILE) - BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs, num_remaining, oob_pair); - else - BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs); - - // RLE - #pragma unroll - for (int ITEM = 1; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - ValueT* d_scatter = d_aggregates_out + pairs[ITEM - 1].key; - if (pairs[ITEM].key != pairs[ITEM - 1].key) - atomicAdd(d_scatter, pairs[ITEM - 1].value); - else - pairs[ITEM].value = reduction_op(pairs[ITEM - 1].value, pairs[ITEM].value); - } - - // Flush last item if valid - ValueT* d_scatter = d_aggregates_out + pairs[ITEMS_PER_THREAD - 1].key; - if ((!IS_LAST_TILE) || (pairs[ITEMS_PER_THREAD - 1].key >= 0)) - atomicAdd(d_scatter, pairs[ITEMS_PER_THREAD - 1].value); - } - - - /** - * Process input tile. Specialized for reduce-by-key fixup - */ - template - __device__ __forceinline__ void ConsumeTile( - OffsetT num_remaining, ///< Number of global input items remaining (including this tile) - int tile_idx, ///< Tile index - OffsetT tile_offset, ///< Tile offset - ScanTileStateT& tile_state, ///< Global tile state descriptor - Int2Type use_atomic_fixup) ///< Marker whether to use atomicAdd (instead of reduce-by-key) - { - KeyValuePairT pairs[ITEMS_PER_THREAD]; - KeyValuePairT scatter_pairs[ITEMS_PER_THREAD]; - - // Load pairs - KeyValuePairT oob_pair; - oob_pair.key = -1; - - if (IS_LAST_TILE) - BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs, num_remaining, oob_pair); - else - BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs); - - CTA_SYNC(); - - KeyValuePairT tile_aggregate; - if (tile_idx == 0) - { - // Exclusive scan of values and segment_flags - BlockScanT(temp_storage.scan).ExclusiveScan(pairs, scatter_pairs, scan_op, tile_aggregate); - - // Update tile status if this is not the last tile - if (threadIdx.x == 0) - { - // Set first segment id to not trigger a flush (invalid from exclusive scan) - scatter_pairs[0].key = pairs[0].key; - - if (!IS_LAST_TILE) - tile_state.SetInclusive(0, tile_aggregate); - - } - } - else - { - // Exclusive scan of values and segment_flags - TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.prefix, scan_op, tile_idx); - BlockScanT(temp_storage.scan).ExclusiveScan(pairs, scatter_pairs, scan_op, prefix_op); - tile_aggregate = prefix_op.GetBlockAggregate(); - } - - // Scatter updated values - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (scatter_pairs[ITEM].key != pairs[ITEM].key) - { - // Update the value at the key location - ValueT value = d_fixup_in[scatter_pairs[ITEM].key]; - value = reduction_op(value, scatter_pairs[ITEM].value); - - d_aggregates_out[scatter_pairs[ITEM].key] = value; - } - } - - // Finalize the last item - if (IS_LAST_TILE) - { - // Last thread will output final count and last item, if necessary - if (threadIdx.x == BLOCK_THREADS - 1) - { - // If the last tile is a whole tile, the inclusive prefix contains accumulated value reduction for the last segment - if (num_remaining == TILE_ITEMS) - { - // Update the value at the key location - OffsetT last_key = pairs[ITEMS_PER_THREAD - 1].key; - d_aggregates_out[last_key] = reduction_op(tile_aggregate.value, d_fixup_in[last_key]); - } - } - } - } - - - /** - * Scan tiles of items as part of a dynamic chained scan - */ - __device__ __forceinline__ void ConsumeRange( - int num_items, ///< Total number of input items - int num_tiles, ///< Total number of input tiles - ScanTileStateT& tile_state) ///< Global tile state descriptor - { - // Blocks are launched in increasing order, so just assign one tile per block - int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index - OffsetT tile_offset = tile_idx * TILE_ITEMS; // Global offset for the current tile - OffsetT num_remaining = num_items - tile_offset; // Remaining items (including this tile) - - if (num_remaining > TILE_ITEMS) - { - // Not the last tile (full) - ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state, Int2Type()); - } - else if (num_remaining > 0) - { - // The last tile (possibly partially-full) - ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state, Int2Type()); - } - } - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/agent_select_if.cuh b/ml-xgboost/cub/cub/agent/agent_select_if.cuh deleted file mode 100644 index a4e37c3..0000000 --- a/ml-xgboost/cub/cub/agent/agent_select_if.cuh +++ /dev/null @@ -1,703 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::AgentSelectIf implements a stateful abstraction of CUDA thread blocks for participating in device-wide select. - */ - -#pragma once - -#include - -#include "single_pass_scan_operators.cuh" -#include "../block/block_load.cuh" -#include "../block/block_store.cuh" -#include "../block/block_scan.cuh" -#include "../block/block_exchange.cuh" -#include "../block/block_discontinuity.cuh" -#include "../grid/grid_queue.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Tuning policy types - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for AgentSelectIf - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use - CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements - BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use -struct AgentSelectIfPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - }; - - static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use - static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements - static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use -}; - - - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - - -/** - * \brief AgentSelectIf implements a stateful abstraction of CUDA thread blocks for participating in device-wide selection - * - * Performs functor-based selection if SelectOpT functor type != NullType - * Otherwise performs flag-based selection if FlagsInputIterator's value type != NullType - * Otherwise performs discontinuity selection (keep unique) - */ -template < - typename AgentSelectIfPolicyT, ///< Parameterized AgentSelectIfPolicy tuning policy type - typename InputIteratorT, ///< Random-access input iterator type for selection items - typename FlagsInputIteratorT, ///< Random-access input iterator type for selections (NullType* if a selection functor or discontinuity flagging is to be used for selection) - typename SelectedOutputIteratorT, ///< Random-access input iterator type for selection_flags items - typename SelectOpT, ///< Selection operator type (NullType if selections or discontinuity flagging is to be used for selection) - typename EqualityOpT, ///< Equality operator type (NullType if selection functor or selections is to be used for selection) - typename OffsetT, ///< Signed integer type for global offsets - bool KEEP_REJECTS> ///< Whether or not we push rejected items to the back of the output -struct AgentSelectIf -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - // The flag value type - typedef typename std::iterator_traits::value_type FlagT; - - // Tile status descriptor interface type - typedef ScanTileState ScanTileStateT; - - // Constants - enum - { - USE_SELECT_OP, - USE_SELECT_FLAGS, - USE_DISCONTINUITY, - - BLOCK_THREADS = AgentSelectIfPolicyT::BLOCK_THREADS, - ITEMS_PER_THREAD = AgentSelectIfPolicyT::ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - TWO_PHASE_SCATTER = (ITEMS_PER_THREAD > 1), - - SELECT_METHOD = (!Equals::VALUE) ? - USE_SELECT_OP : - (!Equals::VALUE) ? - USE_SELECT_FLAGS : - USE_DISCONTINUITY - }; - - // Cache-modified Input iterator wrapper type (for applying cache modifier) for items - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedValuesInputIterator - InputIteratorT>::Type // Directly use the supplied input iterator type - WrappedInputIteratorT; - - // Cache-modified Input iterator wrapper type (for applying cache modifier) for values - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedValuesInputIterator - FlagsInputIteratorT>::Type // Directly use the supplied input iterator type - WrappedFlagsInputIteratorT; - - // Parameterized BlockLoad type for input data - typedef BlockLoad< - OutputT, - BLOCK_THREADS, - ITEMS_PER_THREAD, - AgentSelectIfPolicyT::LOAD_ALGORITHM> - BlockLoadT; - - // Parameterized BlockLoad type for flags - typedef BlockLoad< - FlagT, - BLOCK_THREADS, - ITEMS_PER_THREAD, - AgentSelectIfPolicyT::LOAD_ALGORITHM> - BlockLoadFlags; - - // Parameterized BlockDiscontinuity type for items - typedef BlockDiscontinuity< - OutputT, - BLOCK_THREADS> - BlockDiscontinuityT; - - // Parameterized BlockScan type - typedef BlockScan< - OffsetT, - BLOCK_THREADS, - AgentSelectIfPolicyT::SCAN_ALGORITHM> - BlockScanT; - - // Callback type for obtaining tile prefix during block scan - typedef TilePrefixCallbackOp< - OffsetT, - cub::Sum, - ScanTileStateT> - TilePrefixCallbackOpT; - - // Item exchange type - typedef OutputT ItemExchangeT[TILE_ITEMS]; - - // Shared memory type for this threadblock - union _TempStorage - { - struct - { - typename BlockScanT::TempStorage scan; // Smem needed for tile scanning - typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback - typename BlockDiscontinuityT::TempStorage discontinuity; // Smem needed for discontinuity detection - }; - - // Smem needed for loading items - typename BlockLoadT::TempStorage load_items; - - // Smem needed for loading values - typename BlockLoadFlags::TempStorage load_flags; - - // Smem needed for compacting items (allows non POD items in this union) - Uninitialized raw_exchange; - }; - - // Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - _TempStorage& temp_storage; ///< Reference to temp_storage - WrappedInputIteratorT d_in; ///< Input items - SelectedOutputIteratorT d_selected_out; ///< Unique output items - WrappedFlagsInputIteratorT d_flags_in; ///< Input selection flags (if applicable) - InequalityWrapper inequality_op; ///< T inequality operator - SelectOpT select_op; ///< Selection operator - OffsetT num_items; ///< Total number of input items - - - //--------------------------------------------------------------------- - // Constructor - //--------------------------------------------------------------------- - - // Constructor - __device__ __forceinline__ - AgentSelectIf( - TempStorage &temp_storage, ///< Reference to temp_storage - InputIteratorT d_in, ///< Input data - FlagsInputIteratorT d_flags_in, ///< Input selection flags (if applicable) - SelectedOutputIteratorT d_selected_out, ///< Output data - SelectOpT select_op, ///< Selection operator - EqualityOpT equality_op, ///< Equality operator - OffsetT num_items) ///< Total number of input items - : - temp_storage(temp_storage.Alias()), - d_in(d_in), - d_flags_in(d_flags_in), - d_selected_out(d_selected_out), - select_op(select_op), - inequality_op(equality_op), - num_items(num_items) - {} - - - //--------------------------------------------------------------------- - // Utility methods for initializing the selections - //--------------------------------------------------------------------- - - /** - * Initialize selections (specialized for selection operator) - */ - template - __device__ __forceinline__ void InitializeSelections( - OffsetT /*tile_offset*/, - OffsetT num_tile_items, - OutputT (&items)[ITEMS_PER_THREAD], - OffsetT (&selection_flags)[ITEMS_PER_THREAD], - Int2Type /*select_method*/) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - // Out-of-bounds items are selection_flags - selection_flags[ITEM] = 1; - - if (!IS_LAST_TILE || (OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM < num_tile_items)) - selection_flags[ITEM] = select_op(items[ITEM]); - } - } - - - /** - * Initialize selections (specialized for valid flags) - */ - template - __device__ __forceinline__ void InitializeSelections( - OffsetT tile_offset, - OffsetT num_tile_items, - OutputT (&/*items*/)[ITEMS_PER_THREAD], - OffsetT (&selection_flags)[ITEMS_PER_THREAD], - Int2Type /*select_method*/) - { - CTA_SYNC(); - - FlagT flags[ITEMS_PER_THREAD]; - - if (IS_LAST_TILE) - { - // Out-of-bounds items are selection_flags - BlockLoadFlags(temp_storage.load_flags).Load(d_flags_in + tile_offset, flags, num_tile_items, 1); - } - else - { - BlockLoadFlags(temp_storage.load_flags).Load(d_flags_in + tile_offset, flags); - } - - // Convert flag type to selection_flags type - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - selection_flags[ITEM] = flags[ITEM]; - } - } - - - /** - * Initialize selections (specialized for discontinuity detection) - */ - template - __device__ __forceinline__ void InitializeSelections( - OffsetT tile_offset, - OffsetT num_tile_items, - OutputT (&items)[ITEMS_PER_THREAD], - OffsetT (&selection_flags)[ITEMS_PER_THREAD], - Int2Type /*select_method*/) - { - if (IS_FIRST_TILE) - { - CTA_SYNC(); - - // Set head selection_flags. First tile sets the first flag for the first item - BlockDiscontinuityT(temp_storage.discontinuity).FlagHeads(selection_flags, items, inequality_op); - } - else - { - OutputT tile_predecessor; - if (threadIdx.x == 0) - tile_predecessor = d_in[tile_offset - 1]; - - CTA_SYNC(); - - BlockDiscontinuityT(temp_storage.discontinuity).FlagHeads(selection_flags, items, inequality_op, tile_predecessor); - } - - // Set selection flags for out-of-bounds items - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - // Set selection_flags for out-of-bounds items - if ((IS_LAST_TILE) && (OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM >= num_tile_items)) - selection_flags[ITEM] = 1; - } - } - - - //--------------------------------------------------------------------- - // Scatter utility methods - //--------------------------------------------------------------------- - - /** - * Scatter flagged items to output offsets (specialized for direct scattering) - */ - template - __device__ __forceinline__ void ScatterDirect( - OutputT (&items)[ITEMS_PER_THREAD], - OffsetT (&selection_flags)[ITEMS_PER_THREAD], - OffsetT (&selection_indices)[ITEMS_PER_THREAD], - OffsetT num_selections) - { - // Scatter flagged items - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (selection_flags[ITEM]) - { - if ((!IS_LAST_TILE) || selection_indices[ITEM] < num_selections) - { - d_selected_out[selection_indices[ITEM]] = items[ITEM]; - } - } - } - } - - - /** - * Scatter flagged items to output offsets (specialized for two-phase scattering) - */ - template - __device__ __forceinline__ void ScatterTwoPhase( - OutputT (&items)[ITEMS_PER_THREAD], - OffsetT (&selection_flags)[ITEMS_PER_THREAD], - OffsetT (&selection_indices)[ITEMS_PER_THREAD], - int /*num_tile_items*/, ///< Number of valid items in this tile - int num_tile_selections, ///< Number of selections in this tile - OffsetT num_selections_prefix, ///< Total number of selections prior to this tile - OffsetT /*num_rejected_prefix*/, ///< Total number of rejections prior to this tile - Int2Type /*is_keep_rejects*/) ///< Marker type indicating whether to keep rejected items in the second partition - { - CTA_SYNC(); - - // Compact and scatter items - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int local_scatter_offset = selection_indices[ITEM] - num_selections_prefix; - if (selection_flags[ITEM]) - { - temp_storage.raw_exchange.Alias()[local_scatter_offset] = items[ITEM]; - } - } - - CTA_SYNC(); - - for (int item = threadIdx.x; item < num_tile_selections; item += BLOCK_THREADS) - { - d_selected_out[num_selections_prefix + item] = temp_storage.raw_exchange.Alias()[item]; - } - } - - - /** - * Scatter flagged items to output offsets (specialized for two-phase scattering) - */ - template - __device__ __forceinline__ void ScatterTwoPhase( - OutputT (&items)[ITEMS_PER_THREAD], - OffsetT (&selection_flags)[ITEMS_PER_THREAD], - OffsetT (&selection_indices)[ITEMS_PER_THREAD], - int num_tile_items, ///< Number of valid items in this tile - int num_tile_selections, ///< Number of selections in this tile - OffsetT num_selections_prefix, ///< Total number of selections prior to this tile - OffsetT num_rejected_prefix, ///< Total number of rejections prior to this tile - Int2Type /*is_keep_rejects*/) ///< Marker type indicating whether to keep rejected items in the second partition - { - CTA_SYNC(); - - int tile_num_rejections = num_tile_items - num_tile_selections; - - // Scatter items to shared memory (rejections first) - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int item_idx = (threadIdx.x * ITEMS_PER_THREAD) + ITEM; - int local_selection_idx = selection_indices[ITEM] - num_selections_prefix; - int local_rejection_idx = item_idx - local_selection_idx; - int local_scatter_offset = (selection_flags[ITEM]) ? - tile_num_rejections + local_selection_idx : - local_rejection_idx; - - temp_storage.raw_exchange.Alias()[local_scatter_offset] = items[ITEM]; - } - - CTA_SYNC(); - - // Gather items from shared memory and scatter to global - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int item_idx = (ITEM * BLOCK_THREADS) + threadIdx.x; - int rejection_idx = item_idx; - int selection_idx = item_idx - tile_num_rejections; - OffsetT scatter_offset = (item_idx < tile_num_rejections) ? - num_items - num_rejected_prefix - rejection_idx - 1 : - num_selections_prefix + selection_idx; - - OutputT item = temp_storage.raw_exchange.Alias()[item_idx]; - - if (!IS_LAST_TILE || (item_idx < num_tile_items)) - { - d_selected_out[scatter_offset] = item; - } - } - } - - - /** - * Scatter flagged items - */ - template - __device__ __forceinline__ void Scatter( - OutputT (&items)[ITEMS_PER_THREAD], - OffsetT (&selection_flags)[ITEMS_PER_THREAD], - OffsetT (&selection_indices)[ITEMS_PER_THREAD], - int num_tile_items, ///< Number of valid items in this tile - int num_tile_selections, ///< Number of selections in this tile - OffsetT num_selections_prefix, ///< Total number of selections prior to this tile - OffsetT num_rejected_prefix, ///< Total number of rejections prior to this tile - OffsetT num_selections) ///< Total number of selections including this tile - { - // Do a two-phase scatter if (a) keeping both partitions or (b) two-phase is enabled and the average number of selection_flags items per thread is greater than one - if (KEEP_REJECTS || (TWO_PHASE_SCATTER && (num_tile_selections > BLOCK_THREADS))) - { - ScatterTwoPhase( - items, - selection_flags, - selection_indices, - num_tile_items, - num_tile_selections, - num_selections_prefix, - num_rejected_prefix, - Int2Type()); - } - else - { - ScatterDirect( - items, - selection_flags, - selection_indices, - num_selections); - } - } - - //--------------------------------------------------------------------- - // Cooperatively scan a device-wide sequence of tiles with other CTAs - //--------------------------------------------------------------------- - - - /** - * Process first tile of input (dynamic chained scan). Returns the running count of selections (including this tile) - */ - template - __device__ __forceinline__ OffsetT ConsumeFirstTile( - int num_tile_items, ///< Number of input items comprising this tile - OffsetT tile_offset, ///< Tile offset - ScanTileStateT& tile_state) ///< Global tile state descriptor - { - OutputT items[ITEMS_PER_THREAD]; - OffsetT selection_flags[ITEMS_PER_THREAD]; - OffsetT selection_indices[ITEMS_PER_THREAD]; - - // Load items - if (IS_LAST_TILE) - BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items, num_tile_items); - else - BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items); - - // Initialize selection_flags - InitializeSelections( - tile_offset, - num_tile_items, - items, - selection_flags, - Int2Type()); - - CTA_SYNC(); - - // Exclusive scan of selection_flags - OffsetT num_tile_selections; - BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_tile_selections); - - if (threadIdx.x == 0) - { - // Update tile status if this is not the last tile - if (!IS_LAST_TILE) - tile_state.SetInclusive(0, num_tile_selections); - } - - // Discount any out-of-bounds selections - if (IS_LAST_TILE) - num_tile_selections -= (TILE_ITEMS - num_tile_items); - - // Scatter flagged items - Scatter( - items, - selection_flags, - selection_indices, - num_tile_items, - num_tile_selections, - 0, - 0, - num_tile_selections); - - return num_tile_selections; - } - - - /** - * Process subsequent tile of input (dynamic chained scan). Returns the running count of selections (including this tile) - */ - template - __device__ __forceinline__ OffsetT ConsumeSubsequentTile( - int num_tile_items, ///< Number of input items comprising this tile - int tile_idx, ///< Tile index - OffsetT tile_offset, ///< Tile offset - ScanTileStateT& tile_state) ///< Global tile state descriptor - { - OutputT items[ITEMS_PER_THREAD]; - OffsetT selection_flags[ITEMS_PER_THREAD]; - OffsetT selection_indices[ITEMS_PER_THREAD]; - - // Load items - if (IS_LAST_TILE) - BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items, num_tile_items); - else - BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items); - - // Initialize selection_flags - InitializeSelections( - tile_offset, - num_tile_items, - items, - selection_flags, - Int2Type()); - - CTA_SYNC(); - - // Exclusive scan of values and selection_flags - TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.prefix, cub::Sum(), tile_idx); - BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, prefix_op); - - OffsetT num_tile_selections = prefix_op.GetBlockAggregate(); - OffsetT num_selections = prefix_op.GetInclusivePrefix(); - OffsetT num_selections_prefix = prefix_op.GetExclusivePrefix(); - OffsetT num_rejected_prefix = (tile_idx * TILE_ITEMS) - num_selections_prefix; - - // Discount any out-of-bounds selections - if (IS_LAST_TILE) - { - int num_discount = TILE_ITEMS - num_tile_items; - num_selections -= num_discount; - num_tile_selections -= num_discount; - } - - // Scatter flagged items - Scatter( - items, - selection_flags, - selection_indices, - num_tile_items, - num_tile_selections, - num_selections_prefix, - num_rejected_prefix, - num_selections); - - return num_selections; - } - - - /** - * Process a tile of input - */ - template - __device__ __forceinline__ OffsetT ConsumeTile( - int num_tile_items, ///< Number of input items comprising this tile - int tile_idx, ///< Tile index - OffsetT tile_offset, ///< Tile offset - ScanTileStateT& tile_state) ///< Global tile state descriptor - { - OffsetT num_selections; - if (tile_idx == 0) - { - num_selections = ConsumeFirstTile(num_tile_items, tile_offset, tile_state); - } - else - { - num_selections = ConsumeSubsequentTile(num_tile_items, tile_idx, tile_offset, tile_state); - } - - return num_selections; - } - - - /** - * Scan tiles of items as part of a dynamic chained scan - */ - template ///< Output iterator type for recording number of items selection_flags - __device__ __forceinline__ void ConsumeRange( - int num_tiles, ///< Total number of input tiles - ScanTileStateT& tile_state, ///< Global tile state descriptor - NumSelectedIteratorT d_num_selected_out) ///< Output total number selection_flags - { - // Blocks are launched in increasing order, so just assign one tile per block - int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index - OffsetT tile_offset = tile_idx * TILE_ITEMS; // Global offset for the current tile - - if (tile_idx < num_tiles - 1) - { - // Not the last tile (full) - ConsumeTile(TILE_ITEMS, tile_idx, tile_offset, tile_state); - } - else - { - // The last tile (possibly partially-full) - OffsetT num_remaining = num_items - tile_offset; - OffsetT num_selections = ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state); - - if (threadIdx.x == 0) - { - // Output the total number of items selection_flags - *d_num_selected_out = num_selections; - } - } - } - -}; - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/agent_spmv_csrt.cuh b/ml-xgboost/cub/cub/agent/agent_spmv_csrt.cuh deleted file mode 100644 index 29ab6ea..0000000 --- a/ml-xgboost/cub/cub/agent/agent_spmv_csrt.cuh +++ /dev/null @@ -1,638 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV. - */ - -#pragma once - -#include - -#include "../util_type.cuh" -#include "../block/block_reduce.cuh" -#include "../block/block_scan.cuh" -#include "../block/block_exchange.cuh" -#include "../thread/thread_search.cuh" -#include "../thread/thread_operators.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../iterator/counting_input_iterator.cuh" -#include "../iterator/tex_ref_input_iterator.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Tuning policy - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for AgentSpmv - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - CacheLoadModifier _ROW_OFFSETS_SEARCH_LOAD_MODIFIER, ///< Cache load modifier for reading CSR row-offsets during search - CacheLoadModifier _ROW_OFFSETS_LOAD_MODIFIER, ///< Cache load modifier for reading CSR row-offsets - CacheLoadModifier _COLUMN_INDICES_LOAD_MODIFIER, ///< Cache load modifier for reading CSR column-indices - CacheLoadModifier _VALUES_LOAD_MODIFIER, ///< Cache load modifier for reading CSR values - CacheLoadModifier _VECTOR_VALUES_LOAD_MODIFIER, ///< Cache load modifier for reading vector values - bool _DIRECT_LOAD_NONZEROS, ///< Whether to load nonzeros directly from global during sequential merging (vs. pre-staged through shared memory) - BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use -struct AgentSpmvPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - DIRECT_LOAD_NONZEROS = _DIRECT_LOAD_NONZEROS, ///< Whether to load nonzeros directly from global during sequential merging (pre-staged through shared memory) - }; - - static const CacheLoadModifier ROW_OFFSETS_SEARCH_LOAD_MODIFIER = _ROW_OFFSETS_SEARCH_LOAD_MODIFIER; ///< Cache load modifier for reading CSR row-offsets - static const CacheLoadModifier ROW_OFFSETS_LOAD_MODIFIER = _ROW_OFFSETS_LOAD_MODIFIER; ///< Cache load modifier for reading CSR row-offsets - static const CacheLoadModifier COLUMN_INDICES_LOAD_MODIFIER = _COLUMN_INDICES_LOAD_MODIFIER; ///< Cache load modifier for reading CSR column-indices - static const CacheLoadModifier VALUES_LOAD_MODIFIER = _VALUES_LOAD_MODIFIER; ///< Cache load modifier for reading CSR values - static const CacheLoadModifier VECTOR_VALUES_LOAD_MODIFIER = _VECTOR_VALUES_LOAD_MODIFIER; ///< Cache load modifier for reading vector values - static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use - -}; - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - -template < - typename ValueT, ///< Matrix and vector value type - typename OffsetT> ///< Signed integer type for sequence offsets -struct SpmvParams -{ - ValueT* d_values; ///< Pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix A. - OffsetT* d_row_end_offsets; ///< Pointer to the array of \p m offsets demarcating the end of every row in \p d_column_indices and \p d_values - OffsetT* d_column_indices; ///< Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix A. (Indices are zero-valued.) - ValueT* d_vector_x; ///< Pointer to the array of \p num_cols values corresponding to the dense input vector x - ValueT* d_vector_y; ///< Pointer to the array of \p num_rows values corresponding to the dense output vector y - int num_rows; ///< Number of rows of matrix A. - int num_cols; ///< Number of columns of matrix A. - int num_nonzeros; ///< Number of nonzero elements of matrix A. - ValueT alpha; ///< Alpha multiplicand - ValueT beta; ///< Beta addend-multiplicand - - TexRefInputIterator t_vector_x; -}; - - -/** - * \brief AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV. - */ -template < - typename AgentSpmvPolicyT, ///< Parameterized AgentSpmvPolicy tuning policy type - typename ValueT, ///< Matrix and vector value type - typename OffsetT, ///< Signed integer type for sequence offsets - bool HAS_ALPHA, ///< Whether the input parameter \p alpha is 1 - bool HAS_BETA, ///< Whether the input parameter \p beta is 0 - int PTX_ARCH = CUB_PTX_ARCH> ///< PTX compute capability -struct AgentSpmv -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - /// Constants - enum - { - BLOCK_THREADS = AgentSpmvPolicyT::BLOCK_THREADS, - ITEMS_PER_THREAD = AgentSpmvPolicyT::ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - }; - - /// 2D merge path coordinate type - typedef typename CubVector::Type CoordinateT; - - /// Input iterator wrapper types (for applying cache modifiers) - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::ROW_OFFSETS_SEARCH_LOAD_MODIFIER, - OffsetT, - OffsetT> - RowOffsetsSearchIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::ROW_OFFSETS_LOAD_MODIFIER, - OffsetT, - OffsetT> - RowOffsetsIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::COLUMN_INDICES_LOAD_MODIFIER, - OffsetT, - OffsetT> - ColumnIndicesIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::VALUES_LOAD_MODIFIER, - ValueT, - OffsetT> - ValueIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::VECTOR_VALUES_LOAD_MODIFIER, - ValueT, - OffsetT> - VectorValueIteratorT; - - // Tuple type for scanning (pairs accumulated segment-value with segment-index) - typedef KeyValuePair KeyValuePairT; - - // Reduce-value-by-key scan operator - typedef ReduceByKeyOp ReduceBySegmentOpT; - - // BlockReduce specialization - typedef BlockReduce< - ValueT, - BLOCK_THREADS, - BLOCK_REDUCE_WARP_REDUCTIONS> - BlockReduceT; - - // BlockScan specialization - typedef BlockScan< - KeyValuePairT, - BLOCK_THREADS, - AgentSpmvPolicyT::SCAN_ALGORITHM> - BlockScanT; - - /// Merge item type (either a non-zero value or a row-end offset) - union MergeItem - { - // Value type to pair with index type OffsetT (NullType if loading values directly during merge) - typedef typename If::Type MergeValueT; - - OffsetT row_end_offset; - MergeValueT nonzero; - }; - - /// Shared memory type required by this thread block - struct _TempStorage - { - union { - CoordinateT tile_coord; - OffsetT turnstile; - }; - - union - { - // Smem needed for tile of merge items - MergeItem merge_items[ITEMS_PER_THREAD + TILE_ITEMS + 1]; - - // Smem needed for block-wide reduction - typename BlockReduceT::TempStorage reduce; - - // Smem needed for tile scanning - typename BlockScanT::TempStorage scan; - }; - }; - - /// Temporary storage type (unionable) - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - - _TempStorage& temp_storage; /// Reference to temp_storage - - SpmvParams& spmv_params; - - ValueIteratorT wd_values; ///< Wrapped pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix A. - RowOffsetsIteratorT wd_row_end_offsets; ///< Wrapped Pointer to the array of \p m offsets demarcating the end of every row in \p d_column_indices and \p d_values - ColumnIndicesIteratorT wd_column_indices; ///< Wrapped Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix A. (Indices are zero-valued.) - VectorValueIteratorT wd_vector_x; ///< Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector x - VectorValueIteratorT wd_vector_y; ///< Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector x - - - //--------------------------------------------------------------------- - // Interface - //--------------------------------------------------------------------- - - /** - * Constructor - */ - __device__ __forceinline__ AgentSpmv( - TempStorage& temp_storage, ///< Reference to temp_storage - SpmvParams& spmv_params) ///< SpMV input parameter bundle - : - temp_storage(temp_storage.Alias()), - spmv_params(spmv_params), - wd_values(spmv_params.d_values), - wd_row_end_offsets(spmv_params.d_row_end_offsets), - wd_column_indices(spmv_params.d_column_indices), - wd_vector_x(spmv_params.d_vector_x), - wd_vector_y(spmv_params.d_vector_y) - {} - - - - - /** - * Consume a merge tile, specialized for direct-load of nonzeros - * / - __device__ __forceinline__ KeyValuePairT ConsumeTile( - int tile_idx, - CoordinateT tile_start_coord, - CoordinateT tile_end_coord, - Int2Type is_direct_load) ///< Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch - { - int tile_num_rows = tile_end_coord.x - tile_start_coord.x; - int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y; - OffsetT* s_tile_row_end_offsets = &temp_storage.merge_items[0].row_end_offset; - - // Gather the row end-offsets for the merge tile into shared memory - for (int item = threadIdx.x; item <= tile_num_rows; item += BLOCK_THREADS) - { - s_tile_row_end_offsets[item] = wd_row_end_offsets[tile_start_coord.x + item]; - } - - CTA_SYNC(); - - // Search for the thread's starting coordinate within the merge tile - CountingInputIterator tile_nonzero_indices(tile_start_coord.y); - CoordinateT thread_start_coord; - - MergePathSearch( - OffsetT(threadIdx.x * ITEMS_PER_THREAD), // Diagonal - s_tile_row_end_offsets, // List A - tile_nonzero_indices, // List B - tile_num_rows, - tile_num_nonzeros, - thread_start_coord); - - CTA_SYNC(); // Perf-sync - - // Compute the thread's merge path segment - CoordinateT thread_current_coord = thread_start_coord; - KeyValuePairT scan_segment[ITEMS_PER_THREAD]; - - ValueT running_total = 0.0; - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - OffsetT nonzero_idx = CUB_MIN(tile_nonzero_indices[thread_current_coord.y], spmv_params.num_nonzeros - 1); - OffsetT column_idx = wd_column_indices[nonzero_idx]; - ValueT value = wd_values[nonzero_idx]; - ValueT vector_value = wd_vector_x[column_idx]; - ValueT nonzero = value * vector_value; - - OffsetT row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; - - if (tile_nonzero_indices[thread_current_coord.y] < row_end_offset) - { - // Move down (accumulate) - running_total += nonzero; - scan_segment[ITEM].value = running_total; - scan_segment[ITEM].key = tile_num_rows; - ++thread_current_coord.y; - } - else - { - // Move right (reset) - scan_segment[ITEM].value = running_total; - scan_segment[ITEM].key = thread_current_coord.x; - running_total = 0.0; - ++thread_current_coord.x; - } - } - - CTA_SYNC(); - - // Block-wide reduce-value-by-segment - KeyValuePairT tile_carry; - ReduceBySegmentOpT scan_op; - KeyValuePairT scan_item; - - scan_item.value = running_total; - scan_item.key = thread_current_coord.x; - - BlockScanT(temp_storage.scan).ExclusiveScan(scan_item, scan_item, scan_op, tile_carry); - - if (tile_num_rows > 0) - { - if (threadIdx.x == 0) - scan_item.key = -1; - - // Direct scatter - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (scan_segment[ITEM].key < tile_num_rows) - { - if (scan_item.key == scan_segment[ITEM].key) - scan_segment[ITEM].value = scan_item.value + scan_segment[ITEM].value; - - if (HAS_ALPHA) - { - scan_segment[ITEM].value *= spmv_params.alpha; - } - - if (HAS_BETA) - { - // Update the output vector element - ValueT addend = spmv_params.beta * wd_vector_y[tile_start_coord.x + scan_segment[ITEM].key]; - scan_segment[ITEM].value += addend; - } - - // Set the output vector element - spmv_params.d_vector_y[tile_start_coord.x + scan_segment[ITEM].key] = scan_segment[ITEM].value; - } - } - } - - // Return the tile's running carry-out - return tile_carry; - } -*/ - - - /** - * Consume a merge tile, specialized for indirect load of nonzeros - * / - __device__ __forceinline__ KeyValuePairT ConsumeTile( - int tile_idx, - CoordinateT tile_start_coord, - CoordinateT tile_end_coord, - Int2Type is_direct_load) ///< Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch - { - int tile_num_rows = tile_end_coord.x - tile_start_coord.x; - int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y; - -#if (CUB_PTX_ARCH >= 520) - - OffsetT* s_tile_row_end_offsets = &temp_storage.merge_items[0].row_end_offset; - ValueT* s_tile_nonzeros = &temp_storage.merge_items[tile_num_rows + ITEMS_PER_THREAD].nonzero; - - // Gather the nonzeros for the merge tile into shared memory - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int nonzero_idx = threadIdx.x + (ITEM * BLOCK_THREADS); - - ValueIteratorT a = wd_values + tile_start_coord.y + nonzero_idx; - ColumnIndicesIteratorT ci = wd_column_indices + tile_start_coord.y + nonzero_idx; - ValueT* s = s_tile_nonzeros + nonzero_idx; - - if (nonzero_idx < tile_num_nonzeros) - { - - OffsetT column_idx = *ci; - ValueT value = *a; - ValueT vector_value = spmv_params.t_vector_x[column_idx]; - vector_value = wd_vector_x[column_idx]; - ValueT nonzero = value * vector_value; - *s = nonzero; - } - } - - -#else - - OffsetT* s_tile_row_end_offsets = &temp_storage.merge_items[0].row_end_offset; - ValueT* s_tile_nonzeros = &temp_storage.merge_items[tile_num_rows + ITEMS_PER_THREAD].nonzero; - - // Gather the nonzeros for the merge tile into shared memory - if (tile_num_nonzeros > 0) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int nonzero_idx = threadIdx.x + (ITEM * BLOCK_THREADS); - nonzero_idx = CUB_MIN(nonzero_idx, tile_num_nonzeros - 1); - - OffsetT column_idx = wd_column_indices[tile_start_coord.y + nonzero_idx]; - ValueT value = wd_values[tile_start_coord.y + nonzero_idx]; - - ValueT vector_value = wd_vector_x[column_idx]; - ValueT nonzero = value * vector_value; - - s_tile_nonzeros[nonzero_idx] = nonzero; - } - } - -#endif - - // Gather the row end-offsets for the merge tile into shared memory - #pragma unroll 1 - for (int item = threadIdx.x; item <= tile_num_rows; item += BLOCK_THREADS) - { - s_tile_row_end_offsets[item] = wd_row_end_offsets[tile_start_coord.x + item]; - } - - CTA_SYNC(); - - // Search for the thread's starting coordinate within the merge tile - CountingInputIterator tile_nonzero_indices(tile_start_coord.y); - CoordinateT thread_start_coord; - - MergePathSearch( - OffsetT(threadIdx.x * ITEMS_PER_THREAD), // Diagonal - s_tile_row_end_offsets, // List A - tile_nonzero_indices, // List B - tile_num_rows, - tile_num_nonzeros, - thread_start_coord); - - CTA_SYNC(); // Perf-sync - - // Compute the thread's merge path segment - CoordinateT thread_current_coord = thread_start_coord; - KeyValuePairT scan_segment[ITEMS_PER_THREAD]; - ValueT running_total = 0.0; - - OffsetT row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; - ValueT nonzero = s_tile_nonzeros[thread_current_coord.y]; - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (tile_nonzero_indices[thread_current_coord.y] < row_end_offset) - { - // Move down (accumulate) - scan_segment[ITEM].value = nonzero; - running_total += nonzero; - ++thread_current_coord.y; - nonzero = s_tile_nonzeros[thread_current_coord.y]; - } - else - { - // Move right (reset) - scan_segment[ITEM].value = 0.0; - running_total = 0.0; - ++thread_current_coord.x; - row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; - } - - scan_segment[ITEM].key = thread_current_coord.x; - } - - CTA_SYNC(); - - // Block-wide reduce-value-by-segment - KeyValuePairT tile_carry; - ReduceBySegmentOpT scan_op; - KeyValuePairT scan_item; - - scan_item.value = running_total; - scan_item.key = thread_current_coord.x; - - BlockScanT(temp_storage.scan).ExclusiveScan(scan_item, scan_item, scan_op, tile_carry); - - if (threadIdx.x == 0) - { - scan_item.key = thread_start_coord.x; - scan_item.value = 0.0; - } - - if (tile_num_rows > 0) - { - - CTA_SYNC(); - - // Scan downsweep and scatter - ValueT* s_partials = &temp_storage.merge_items[0].nonzero; - - if (scan_item.key != scan_segment[0].key) - { - s_partials[scan_item.key] = scan_item.value; - } - else - { - scan_segment[0].value += scan_item.value; - } - - #pragma unroll - for (int ITEM = 1; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (scan_segment[ITEM - 1].key != scan_segment[ITEM].key) - { - s_partials[scan_segment[ITEM - 1].key] = scan_segment[ITEM - 1].value; - } - else - { - scan_segment[ITEM].value += scan_segment[ITEM - 1].value; - } - } - - CTA_SYNC(); - - #pragma unroll 1 - for (int item = threadIdx.x; item < tile_num_rows; item += BLOCK_THREADS) - { - spmv_params.d_vector_y[tile_start_coord.x + item] = s_partials[item]; - } - } - - // Return the tile's running carry-out - return tile_carry; - } -*/ - - /** - * Consume input tile - */ - __device__ __forceinline__ void ConsumeTile( - int merge_items_per_block, ///< [in] Number of merge tiles per block - KeyValuePairT* d_tile_carry_pairs) ///< [out] Pointer to the temporary array carry-out dot product row-ids, one per block - { - // Read our starting coordinates - if (threadIdx.x == 0) - { - // Search our starting coordinates - OffsetT diagonal = blockIdx.x * merge_items_per_block; - CoordinateT tile_coord; - CountingInputIterator nonzero_indices(0); - - // Search the merge path - MergePathSearch( - diagonal, - RowOffsetsSearchIteratorT(spmv_params.d_row_end_offsets), - nonzero_indices, - spmv_params.num_rows, - spmv_params.num_nonzeros, - tile_coord); - - temp_storage.tile_coord = tile_coord; - } - - CTA_SYNC(); - - CoordinateT tile_start_coord = temp_storage.tile_coord; - - - // Mooch - __shared__ volatile OffsetT x; - x = tile_start_coord.x; - - - // Turnstile - if (threadIdx.x == 0) - { - __threadfence(); - temp_storage.turnstile = atomicAdd(spmv_params.d_row_end_offsets - 1, 1); - } - - CTA_SYNC(); - - // Last block through turnstile does fixup - if (temp_storage.turnstile == gridDim.x - 1) - { - if (threadIdx.x == 0) - { - spmv_params.d_row_end_offsets[-1] = 0; - } - - } - - - } - - -}; - - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/agent_spmv_orig.cuh b/ml-xgboost/cub/cub/agent/agent_spmv_orig.cuh deleted file mode 100644 index 5179b09..0000000 --- a/ml-xgboost/cub/cub/agent/agent_spmv_orig.cuh +++ /dev/null @@ -1,924 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV. - */ - -#pragma once - -#include - -#include "../util_type.cuh" -#include "../block/block_reduce.cuh" -#include "../block/block_scan.cuh" -#include "../block/block_exchange.cuh" -#include "../thread/thread_search.cuh" -#include "../thread/thread_operators.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../iterator/counting_input_iterator.cuh" -#include "../iterator/tex_ref_input_iterator.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Tuning policy - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for AgentSpmv - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - CacheLoadModifier _ROW_OFFSETS_SEARCH_LOAD_MODIFIER, ///< Cache load modifier for reading CSR row-offsets during search - CacheLoadModifier _ROW_OFFSETS_LOAD_MODIFIER, ///< Cache load modifier for reading CSR row-offsets - CacheLoadModifier _COLUMN_INDICES_LOAD_MODIFIER, ///< Cache load modifier for reading CSR column-indices - CacheLoadModifier _VALUES_LOAD_MODIFIER, ///< Cache load modifier for reading CSR values - CacheLoadModifier _VECTOR_VALUES_LOAD_MODIFIER, ///< Cache load modifier for reading vector values - bool _DIRECT_LOAD_NONZEROS, ///< Whether to load nonzeros directly from global during sequential merging (vs. pre-staged through shared memory) - BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use -struct AgentSpmvPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - DIRECT_LOAD_NONZEROS = _DIRECT_LOAD_NONZEROS, ///< Whether to load nonzeros directly from global during sequential merging (pre-staged through shared memory) - }; - - static const CacheLoadModifier ROW_OFFSETS_SEARCH_LOAD_MODIFIER = _ROW_OFFSETS_SEARCH_LOAD_MODIFIER; ///< Cache load modifier for reading CSR row-offsets - static const CacheLoadModifier ROW_OFFSETS_LOAD_MODIFIER = _ROW_OFFSETS_LOAD_MODIFIER; ///< Cache load modifier for reading CSR row-offsets - static const CacheLoadModifier COLUMN_INDICES_LOAD_MODIFIER = _COLUMN_INDICES_LOAD_MODIFIER; ///< Cache load modifier for reading CSR column-indices - static const CacheLoadModifier VALUES_LOAD_MODIFIER = _VALUES_LOAD_MODIFIER; ///< Cache load modifier for reading CSR values - static const CacheLoadModifier VECTOR_VALUES_LOAD_MODIFIER = _VECTOR_VALUES_LOAD_MODIFIER; ///< Cache load modifier for reading vector values - static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use - -}; - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - -template < - typename ValueT, ///< Matrix and vector value type - typename OffsetT> ///< Signed integer type for sequence offsets -struct SpmvParams -{ - ValueT* d_values; ///< Pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix A. - OffsetT* d_row_end_offsets; ///< Pointer to the array of \p m offsets demarcating the end of every row in \p d_column_indices and \p d_values - OffsetT* d_column_indices; ///< Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix A. (Indices are zero-valued.) - ValueT* d_vector_x; ///< Pointer to the array of \p num_cols values corresponding to the dense input vector x - ValueT* d_vector_y; ///< Pointer to the array of \p num_rows values corresponding to the dense output vector y - int num_rows; ///< Number of rows of matrix A. - int num_cols; ///< Number of columns of matrix A. - int num_nonzeros; ///< Number of nonzero elements of matrix A. - ValueT alpha; ///< Alpha multiplicand - ValueT beta; ///< Beta addend-multiplicand - - TexRefInputIterator t_vector_x; -}; - - -/** - * \brief AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV. - */ -template < - typename AgentSpmvPolicyT, ///< Parameterized AgentSpmvPolicy tuning policy type - typename ValueT, ///< Matrix and vector value type - typename OffsetT, ///< Signed integer type for sequence offsets - bool HAS_ALPHA, ///< Whether the input parameter \p alpha is 1 - bool HAS_BETA, ///< Whether the input parameter \p beta is 0 - int PTX_ARCH = CUB_PTX_ARCH> ///< PTX compute capability -struct AgentSpmv -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - /// Constants - enum - { - BLOCK_THREADS = AgentSpmvPolicyT::BLOCK_THREADS, - ITEMS_PER_THREAD = AgentSpmvPolicyT::ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - }; - - /// 2D merge path coordinate type - typedef typename CubVector::Type CoordinateT; - - /// Input iterator wrapper types (for applying cache modifiers) - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::ROW_OFFSETS_SEARCH_LOAD_MODIFIER, - OffsetT, - OffsetT> - RowOffsetsSearchIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::ROW_OFFSETS_LOAD_MODIFIER, - OffsetT, - OffsetT> - RowOffsetsIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::COLUMN_INDICES_LOAD_MODIFIER, - OffsetT, - OffsetT> - ColumnIndicesIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::VALUES_LOAD_MODIFIER, - ValueT, - OffsetT> - ValueIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::VECTOR_VALUES_LOAD_MODIFIER, - ValueT, - OffsetT> - VectorValueIteratorT; - - // Tuple type for scanning (pairs accumulated segment-value with segment-index) - typedef KeyValuePair KeyValuePairT; - - // Reduce-value-by-segment scan operator - typedef ReduceByKeyOp ReduceBySegmentOpT; - - // BlockReduce specialization - typedef BlockReduce< - ValueT, - BLOCK_THREADS, - BLOCK_REDUCE_WARP_REDUCTIONS> - BlockReduceT; - - // BlockScan specialization - typedef BlockScan< - KeyValuePairT, - BLOCK_THREADS, - AgentSpmvPolicyT::SCAN_ALGORITHM> - BlockScanT; - - // BlockScan specialization - typedef BlockScan< - ValueT, - BLOCK_THREADS, - AgentSpmvPolicyT::SCAN_ALGORITHM> - BlockPrefixSumT; - - // BlockExchange specialization - typedef BlockExchange< - ValueT, - BLOCK_THREADS, - ITEMS_PER_THREAD> - BlockExchangeT; - - /// Merge item type (either a non-zero value or a row-end offset) - union MergeItem - { - // Value type to pair with index type OffsetT (NullType if loading values directly during merge) - typedef typename If::Type MergeValueT; - - OffsetT row_end_offset; - MergeValueT nonzero; - }; - - /// Shared memory type required by this thread block - struct _TempStorage - { - CoordinateT tile_coords[2]; - - union - { - // Smem needed for tile of merge items - MergeItem merge_items[ITEMS_PER_THREAD + TILE_ITEMS + 1]; - - // Smem needed for block exchange - typename BlockExchangeT::TempStorage exchange; - - // Smem needed for block-wide reduction - typename BlockReduceT::TempStorage reduce; - - // Smem needed for tile scanning - typename BlockScanT::TempStorage scan; - - // Smem needed for tile prefix sum - typename BlockPrefixSumT::TempStorage prefix_sum; - }; - }; - - /// Temporary storage type (unionable) - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - - _TempStorage& temp_storage; /// Reference to temp_storage - - SpmvParams& spmv_params; - - ValueIteratorT wd_values; ///< Wrapped pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix A. - RowOffsetsIteratorT wd_row_end_offsets; ///< Wrapped Pointer to the array of \p m offsets demarcating the end of every row in \p d_column_indices and \p d_values - ColumnIndicesIteratorT wd_column_indices; ///< Wrapped Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix A. (Indices are zero-valued.) - VectorValueIteratorT wd_vector_x; ///< Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector x - VectorValueIteratorT wd_vector_y; ///< Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector x - - - //--------------------------------------------------------------------- - // Interface - //--------------------------------------------------------------------- - - /** - * Constructor - */ - __device__ __forceinline__ AgentSpmv( - TempStorage& temp_storage, ///< Reference to temp_storage - SpmvParams& spmv_params) ///< SpMV input parameter bundle - : - temp_storage(temp_storage.Alias()), - spmv_params(spmv_params), - wd_values(spmv_params.d_values), - wd_row_end_offsets(spmv_params.d_row_end_offsets), - wd_column_indices(spmv_params.d_column_indices), - wd_vector_x(spmv_params.d_vector_x), - wd_vector_y(spmv_params.d_vector_y) - {} - - - - - /** - * Consume a merge tile, specialized for direct-load of nonzeros - */ - __device__ __forceinline__ KeyValuePairT ConsumeTile( - int tile_idx, - CoordinateT tile_start_coord, - CoordinateT tile_end_coord, - Int2Type is_direct_load) ///< Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch - { - int tile_num_rows = tile_end_coord.x - tile_start_coord.x; - int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y; - OffsetT* s_tile_row_end_offsets = &temp_storage.merge_items[0].row_end_offset; - - // Gather the row end-offsets for the merge tile into shared memory - for (int item = threadIdx.x; item <= tile_num_rows; item += BLOCK_THREADS) - { - s_tile_row_end_offsets[item] = wd_row_end_offsets[tile_start_coord.x + item]; - } - - CTA_SYNC(); - - // Search for the thread's starting coordinate within the merge tile - CountingInputIterator tile_nonzero_indices(tile_start_coord.y); - CoordinateT thread_start_coord; - - MergePathSearch( - OffsetT(threadIdx.x * ITEMS_PER_THREAD), // Diagonal - s_tile_row_end_offsets, // List A - tile_nonzero_indices, // List B - tile_num_rows, - tile_num_nonzeros, - thread_start_coord); - - CTA_SYNC(); // Perf-sync - - // Compute the thread's merge path segment - CoordinateT thread_current_coord = thread_start_coord; - KeyValuePairT scan_segment[ITEMS_PER_THREAD]; - - ValueT running_total = 0.0; - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - OffsetT nonzero_idx = CUB_MIN(tile_nonzero_indices[thread_current_coord.y], spmv_params.num_nonzeros - 1); - OffsetT column_idx = wd_column_indices[nonzero_idx]; - ValueT value = wd_values[nonzero_idx]; - - ValueT vector_value = spmv_params.t_vector_x[column_idx]; -#if (CUB_PTX_ARCH >= 350) - vector_value = wd_vector_x[column_idx]; -#endif - ValueT nonzero = value * vector_value; - - OffsetT row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; - - if (tile_nonzero_indices[thread_current_coord.y] < row_end_offset) - { - // Move down (accumulate) - running_total += nonzero; - scan_segment[ITEM].value = running_total; - scan_segment[ITEM].key = tile_num_rows; - ++thread_current_coord.y; - } - else - { - // Move right (reset) - scan_segment[ITEM].value = running_total; - scan_segment[ITEM].key = thread_current_coord.x; - running_total = 0.0; - ++thread_current_coord.x; - } - } - - CTA_SYNC(); - - // Block-wide reduce-value-by-segment - KeyValuePairT tile_carry; - ReduceBySegmentOpT scan_op; - KeyValuePairT scan_item; - - scan_item.value = running_total; - scan_item.key = thread_current_coord.x; - - BlockScanT(temp_storage.scan).ExclusiveScan(scan_item, scan_item, scan_op, tile_carry); - - if (tile_num_rows > 0) - { - if (threadIdx.x == 0) - scan_item.key = -1; - - // Direct scatter - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (scan_segment[ITEM].key < tile_num_rows) - { - if (scan_item.key == scan_segment[ITEM].key) - scan_segment[ITEM].value = scan_item.value + scan_segment[ITEM].value; - - if (HAS_ALPHA) - { - scan_segment[ITEM].value *= spmv_params.alpha; - } - - if (HAS_BETA) - { - // Update the output vector element - ValueT addend = spmv_params.beta * wd_vector_y[tile_start_coord.x + scan_segment[ITEM].key]; - scan_segment[ITEM].value += addend; - } - - // Set the output vector element - spmv_params.d_vector_y[tile_start_coord.x + scan_segment[ITEM].key] = scan_segment[ITEM].value; - } - } - } - - // Return the tile's running carry-out - return tile_carry; - } - - - - /** - * Consume a merge tile, specialized for indirect load of nonzeros - */ - __device__ __forceinline__ KeyValuePairT ConsumeTile( - int tile_idx, - CoordinateT tile_start_coord, - CoordinateT tile_end_coord, - Int2Type is_direct_load) ///< Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch - { - int tile_num_rows = tile_end_coord.x - tile_start_coord.x; - int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y; - -#if (CUB_PTX_ARCH >= 520) - -/* - OffsetT* s_tile_row_end_offsets = &temp_storage.merge_items[tile_num_nonzeros].row_end_offset; - ValueT* s_tile_nonzeros = &temp_storage.merge_items[0].nonzero; - - OffsetT col_indices[ITEMS_PER_THREAD]; - ValueT mat_values[ITEMS_PER_THREAD]; - int nonzero_indices[ITEMS_PER_THREAD]; - - // Gather the nonzeros for the merge tile into shared memory - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - nonzero_indices[ITEM] = threadIdx.x + (ITEM * BLOCK_THREADS); - - ValueIteratorT a = wd_values + tile_start_coord.y + nonzero_indices[ITEM]; - ColumnIndicesIteratorT ci = wd_column_indices + tile_start_coord.y + nonzero_indices[ITEM]; - - col_indices[ITEM] = (nonzero_indices[ITEM] < tile_num_nonzeros) ? *ci : 0; - mat_values[ITEM] = (nonzero_indices[ITEM] < tile_num_nonzeros) ? *a : 0.0; - } - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - VectorValueIteratorT x = wd_vector_x + col_indices[ITEM]; - mat_values[ITEM] *= *x; - } - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - ValueT *s = s_tile_nonzeros + nonzero_indices[ITEM]; - - *s = mat_values[ITEM]; - } - - CTA_SYNC(); - -*/ - - OffsetT* s_tile_row_end_offsets = &temp_storage.merge_items[0].row_end_offset; - ValueT* s_tile_nonzeros = &temp_storage.merge_items[tile_num_rows + ITEMS_PER_THREAD].nonzero; - - // Gather the nonzeros for the merge tile into shared memory - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int nonzero_idx = threadIdx.x + (ITEM * BLOCK_THREADS); - - ValueIteratorT a = wd_values + tile_start_coord.y + nonzero_idx; - ColumnIndicesIteratorT ci = wd_column_indices + tile_start_coord.y + nonzero_idx; - ValueT* s = s_tile_nonzeros + nonzero_idx; - - if (nonzero_idx < tile_num_nonzeros) - { - - OffsetT column_idx = *ci; - ValueT value = *a; - - ValueT vector_value = spmv_params.t_vector_x[column_idx]; - vector_value = wd_vector_x[column_idx]; - - ValueT nonzero = value * vector_value; - - *s = nonzero; - } - } - - -#else - - OffsetT* s_tile_row_end_offsets = &temp_storage.merge_items[0].row_end_offset; - ValueT* s_tile_nonzeros = &temp_storage.merge_items[tile_num_rows + ITEMS_PER_THREAD].nonzero; - - // Gather the nonzeros for the merge tile into shared memory - if (tile_num_nonzeros > 0) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int nonzero_idx = threadIdx.x + (ITEM * BLOCK_THREADS); - nonzero_idx = CUB_MIN(nonzero_idx, tile_num_nonzeros - 1); - - OffsetT column_idx = wd_column_indices[tile_start_coord.y + nonzero_idx]; - ValueT value = wd_values[tile_start_coord.y + nonzero_idx]; - - ValueT vector_value = spmv_params.t_vector_x[column_idx]; -#if (CUB_PTX_ARCH >= 350) - vector_value = wd_vector_x[column_idx]; -#endif - ValueT nonzero = value * vector_value; - - s_tile_nonzeros[nonzero_idx] = nonzero; - } - } - -#endif - - // Gather the row end-offsets for the merge tile into shared memory - #pragma unroll 1 - for (int item = threadIdx.x; item <= tile_num_rows; item += BLOCK_THREADS) - { - s_tile_row_end_offsets[item] = wd_row_end_offsets[tile_start_coord.x + item]; - } - - CTA_SYNC(); - - // Search for the thread's starting coordinate within the merge tile - CountingInputIterator tile_nonzero_indices(tile_start_coord.y); - CoordinateT thread_start_coord; - - MergePathSearch( - OffsetT(threadIdx.x * ITEMS_PER_THREAD), // Diagonal - s_tile_row_end_offsets, // List A - tile_nonzero_indices, // List B - tile_num_rows, - tile_num_nonzeros, - thread_start_coord); - - CTA_SYNC(); // Perf-sync - - // Compute the thread's merge path segment - CoordinateT thread_current_coord = thread_start_coord; - KeyValuePairT scan_segment[ITEMS_PER_THREAD]; - ValueT running_total = 0.0; - - OffsetT row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; - ValueT nonzero = s_tile_nonzeros[thread_current_coord.y]; - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (tile_nonzero_indices[thread_current_coord.y] < row_end_offset) - { - // Move down (accumulate) - scan_segment[ITEM].value = nonzero; - running_total += nonzero; - ++thread_current_coord.y; - nonzero = s_tile_nonzeros[thread_current_coord.y]; - } - else - { - // Move right (reset) - scan_segment[ITEM].value = 0.0; - running_total = 0.0; - ++thread_current_coord.x; - row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; - } - - scan_segment[ITEM].key = thread_current_coord.x; - } - - CTA_SYNC(); - - // Block-wide reduce-value-by-segment - KeyValuePairT tile_carry; - ReduceBySegmentOpT scan_op; - KeyValuePairT scan_item; - - scan_item.value = running_total; - scan_item.key = thread_current_coord.x; - - BlockScanT(temp_storage.scan).ExclusiveScan(scan_item, scan_item, scan_op, tile_carry); - - if (threadIdx.x == 0) - { - scan_item.key = thread_start_coord.x; - scan_item.value = 0.0; - } - - if (tile_num_rows > 0) - { - - CTA_SYNC(); - - // Scan downsweep and scatter - ValueT* s_partials = &temp_storage.merge_items[0].nonzero; - - if (scan_item.key != scan_segment[0].key) - { - s_partials[scan_item.key] = scan_item.value; - } - else - { - scan_segment[0].value += scan_item.value; - } - - #pragma unroll - for (int ITEM = 1; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (scan_segment[ITEM - 1].key != scan_segment[ITEM].key) - { - s_partials[scan_segment[ITEM - 1].key] = scan_segment[ITEM - 1].value; - } - else - { - scan_segment[ITEM].value += scan_segment[ITEM - 1].value; - } - } - - CTA_SYNC(); - - #pragma unroll 1 - for (int item = threadIdx.x; item < tile_num_rows; item += BLOCK_THREADS) - { - spmv_params.d_vector_y[tile_start_coord.x + item] = s_partials[item]; - } - } - - // Return the tile's running carry-out - return tile_carry; - } - - - - - - - - /** - * Consume a merge tile, specialized for indirect load of nonzeros - * / - template - __device__ __forceinline__ KeyValuePairT ConsumeTile1( - int tile_idx, - CoordinateT tile_start_coord, - CoordinateT tile_end_coord, - IsDirectLoadT is_direct_load) ///< Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch - { - int tile_num_rows = tile_end_coord.x - tile_start_coord.x; - int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y; - - OffsetT* s_tile_row_end_offsets = &temp_storage.merge_items[0].row_end_offset; - - int warp_idx = threadIdx.x / WARP_THREADS; - int lane_idx = LaneId(); - - // Gather the row end-offsets for the merge tile into shared memory - #pragma unroll 1 - for (int item = threadIdx.x; item <= tile_num_rows; item += BLOCK_THREADS) - { - s_tile_row_end_offsets[item] = wd_row_end_offsets[tile_start_coord.x + item]; - } - - CTA_SYNC(); - - // Search for warp start/end coords - if (lane_idx == 0) - { - MergePathSearch( - OffsetT(warp_idx * ITEMS_PER_WARP), // Diagonal - s_tile_row_end_offsets, // List A - CountingInputIterator(tile_start_coord.y), // List B - tile_num_rows, - tile_num_nonzeros, - temp_storage.warp_coords[warp_idx]); - - CoordinateT last = {tile_num_rows, tile_num_nonzeros}; - temp_storage.warp_coords[WARPS] = last; - } - - CTA_SYNC(); - - CoordinateT warp_coord = temp_storage.warp_coords[warp_idx]; - CoordinateT warp_end_coord = temp_storage.warp_coords[warp_idx + 1]; - OffsetT warp_nonzero_idx = tile_start_coord.y + warp_coord.y; - - // Consume whole rows - #pragma unroll 1 - for (; warp_coord.x < warp_end_coord.x; ++warp_coord.x) - { - ValueT row_total = 0.0; - OffsetT row_end_offset = s_tile_row_end_offsets[warp_coord.x]; - - #pragma unroll 1 - for (OffsetT nonzero_idx = warp_nonzero_idx + lane_idx; - nonzero_idx < row_end_offset; - nonzero_idx += WARP_THREADS) - { - OffsetT column_idx = wd_column_indices[nonzero_idx]; - ValueT value = wd_values[nonzero_idx]; - ValueT vector_value = wd_vector_x[column_idx]; - row_total += value * vector_value; - } - - // Warp reduce - row_total = WarpReduceT(temp_storage.warp_reduce[warp_idx]).Sum(row_total); - - // Output - if (lane_idx == 0) - { - spmv_params.d_vector_y[tile_start_coord.x + warp_coord.x] = row_total; - } - - warp_nonzero_idx = row_end_offset; - } - - // Consume partial portion of thread's last row - if (warp_nonzero_idx < tile_start_coord.y + warp_end_coord.y) - { - ValueT row_total = 0.0; - for (OffsetT nonzero_idx = warp_nonzero_idx + lane_idx; - nonzero_idx < tile_start_coord.y + warp_end_coord.y; - nonzero_idx += WARP_THREADS) - { - - OffsetT column_idx = wd_column_indices[nonzero_idx]; - ValueT value = wd_values[nonzero_idx]; - ValueT vector_value = wd_vector_x[column_idx]; - row_total += value * vector_value; - } - - // Warp reduce - row_total = WarpReduceT(temp_storage.warp_reduce[warp_idx]).Sum(row_total); - - // Output - if (lane_idx == 0) - { - spmv_params.d_vector_y[tile_start_coord.x + warp_coord.x] = row_total; - } - } - - // Return the tile's running carry-out - KeyValuePairT tile_carry(tile_num_rows, 0.0); - return tile_carry; - } -*/ - - - - - - - - /** - * Consume a merge tile, specialized for indirect load of nonzeros - * / - __device__ __forceinline__ KeyValuePairT ConsumeTile2( - int tile_idx, - CoordinateT tile_start_coord, - CoordinateT tile_end_coord, - Int2Type is_direct_load) ///< Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch - { - int tile_num_rows = tile_end_coord.x - tile_start_coord.x; - int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y; - - ValueT* s_tile_nonzeros = &temp_storage.merge_items[0].nonzero; - - ValueT nonzeros[ITEMS_PER_THREAD]; - - // Gather the nonzeros for the merge tile into shared memory - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int nonzero_idx = threadIdx.x + (ITEM * BLOCK_THREADS); - nonzero_idx = CUB_MIN(nonzero_idx, tile_num_nonzeros - 1); - - OffsetT column_idx = wd_column_indices[tile_start_coord.y + nonzero_idx]; - ValueT value = wd_values[tile_start_coord.y + nonzero_idx]; - - ValueT vector_value = spmv_params.t_vector_x[column_idx]; -#if (CUB_PTX_ARCH >= 350) - vector_value = wd_vector_x[column_idx]; -#endif - - nonzeros[ITEM] = value * vector_value; - } - - // Exchange striped->blocked - BlockExchangeT(temp_storage.exchange).StripedToBlocked(nonzeros); - - CTA_SYNC(); - - // Compute an inclusive prefix sum - BlockPrefixSumT(temp_storage.prefix_sum).InclusiveSum(nonzeros, nonzeros); - - CTA_SYNC(); - - if (threadIdx.x == 0) - s_tile_nonzeros[0] = 0.0; - - // Scatter back to smem - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int item_idx = (threadIdx.x * ITEMS_PER_THREAD) + ITEM + 1; - s_tile_nonzeros[item_idx] = nonzeros[ITEM]; - } - - CTA_SYNC(); - - // Gather the row end-offsets for the merge tile into shared memory - #pragma unroll 1 - for (int item = threadIdx.x; item < tile_num_rows; item += BLOCK_THREADS) - { - OffsetT start = CUB_MAX(wd_row_end_offsets[tile_start_coord.x + item - 1], tile_start_coord.y); - OffsetT end = wd_row_end_offsets[tile_start_coord.x + item]; - - start -= tile_start_coord.y; - end -= tile_start_coord.y; - - ValueT row_partial = s_tile_nonzeros[end] - s_tile_nonzeros[start]; - - spmv_params.d_vector_y[tile_start_coord.x + item] = row_partial; - } - - // Get the tile's carry-out - KeyValuePairT tile_carry; - if (threadIdx.x == 0) - { - tile_carry.key = tile_num_rows; - - OffsetT start = CUB_MAX(wd_row_end_offsets[tile_end_coord.x - 1], tile_start_coord.y); - start -= tile_start_coord.y; - OffsetT end = tile_num_nonzeros; - - tile_carry.value = s_tile_nonzeros[end] - s_tile_nonzeros[start]; - } - - // Return the tile's running carry-out - return tile_carry; - } -*/ - - - /** - * Consume input tile - */ - __device__ __forceinline__ void ConsumeTile( - CoordinateT* d_tile_coordinates, ///< [in] Pointer to the temporary array of tile starting coordinates - KeyValuePairT* d_tile_carry_pairs, ///< [out] Pointer to the temporary array carry-out dot product row-ids, one per block - int num_merge_tiles) ///< [in] Number of merge tiles - { - int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index - - if (tile_idx >= num_merge_tiles) - return; - - // Read our starting coordinates - if (threadIdx.x < 2) - { - if (d_tile_coordinates == NULL) - { - // Search our starting coordinates - OffsetT diagonal = (tile_idx + threadIdx.x) * TILE_ITEMS; - CoordinateT tile_coord; - CountingInputIterator nonzero_indices(0); - - // Search the merge path - MergePathSearch( - diagonal, - RowOffsetsSearchIteratorT(spmv_params.d_row_end_offsets), - nonzero_indices, - spmv_params.num_rows, - spmv_params.num_nonzeros, - tile_coord); - - temp_storage.tile_coords[threadIdx.x] = tile_coord; - } - else - { - temp_storage.tile_coords[threadIdx.x] = d_tile_coordinates[tile_idx + threadIdx.x]; - } - } - - CTA_SYNC(); - - CoordinateT tile_start_coord = temp_storage.tile_coords[0]; - CoordinateT tile_end_coord = temp_storage.tile_coords[1]; - - // Consume multi-segment tile - KeyValuePairT tile_carry = ConsumeTile( - tile_idx, - tile_start_coord, - tile_end_coord, - Int2Type()); - - // Output the tile's carry-out - if (threadIdx.x == 0) - { - if (HAS_ALPHA) - tile_carry.value *= spmv_params.alpha; - - tile_carry.key += tile_start_coord.x; - d_tile_carry_pairs[tile_idx] = tile_carry; - } - } - - -}; - - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/agent_spmv_row_based.cuh b/ml-xgboost/cub/cub/agent/agent_spmv_row_based.cuh deleted file mode 100644 index cc2bd88..0000000 --- a/ml-xgboost/cub/cub/agent/agent_spmv_row_based.cuh +++ /dev/null @@ -1,470 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV. - */ - -#pragma once - -#include - -#include "../util_type.cuh" -#include "../block/block_reduce.cuh" -#include "../block/block_scan.cuh" -#include "../block/block_exchange.cuh" -#include "../thread/thread_search.cuh" -#include "../thread/thread_operators.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../iterator/counting_input_iterator.cuh" -#include "../iterator/tex_ref_input_iterator.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Tuning policy - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for AgentSpmv - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - CacheLoadModifier _ROW_OFFSETS_SEARCH_LOAD_MODIFIER, ///< Cache load modifier for reading CSR row-offsets during search - CacheLoadModifier _ROW_OFFSETS_LOAD_MODIFIER, ///< Cache load modifier for reading CSR row-offsets - CacheLoadModifier _COLUMN_INDICES_LOAD_MODIFIER, ///< Cache load modifier for reading CSR column-indices - CacheLoadModifier _VALUES_LOAD_MODIFIER, ///< Cache load modifier for reading CSR values - CacheLoadModifier _VECTOR_VALUES_LOAD_MODIFIER, ///< Cache load modifier for reading vector values - bool _DIRECT_LOAD_NONZEROS, ///< Whether to load nonzeros directly from global during sequential merging (vs. pre-staged through shared memory) - BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use -struct AgentSpmvPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - DIRECT_LOAD_NONZEROS = _DIRECT_LOAD_NONZEROS, ///< Whether to load nonzeros directly from global during sequential merging (pre-staged through shared memory) - }; - - static const CacheLoadModifier ROW_OFFSETS_SEARCH_LOAD_MODIFIER = _ROW_OFFSETS_SEARCH_LOAD_MODIFIER; ///< Cache load modifier for reading CSR row-offsets - static const CacheLoadModifier ROW_OFFSETS_LOAD_MODIFIER = _ROW_OFFSETS_LOAD_MODIFIER; ///< Cache load modifier for reading CSR row-offsets - static const CacheLoadModifier COLUMN_INDICES_LOAD_MODIFIER = _COLUMN_INDICES_LOAD_MODIFIER; ///< Cache load modifier for reading CSR column-indices - static const CacheLoadModifier VALUES_LOAD_MODIFIER = _VALUES_LOAD_MODIFIER; ///< Cache load modifier for reading CSR values - static const CacheLoadModifier VECTOR_VALUES_LOAD_MODIFIER = _VECTOR_VALUES_LOAD_MODIFIER; ///< Cache load modifier for reading vector values - static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use - -}; - - -/****************************************************************************** - * Thread block abstractions - ******************************************************************************/ - -template < - typename ValueT, ///< Matrix and vector value type - typename OffsetT> ///< Signed integer type for sequence offsets -struct SpmvParams -{ - ValueT* d_values; ///< Pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix A. - OffsetT* d_row_end_offsets; ///< Pointer to the array of \p m offsets demarcating the end of every row in \p d_column_indices and \p d_values - OffsetT* d_column_indices; ///< Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix A. (Indices are zero-valued.) - ValueT* d_vector_x; ///< Pointer to the array of \p num_cols values corresponding to the dense input vector x - ValueT* d_vector_y; ///< Pointer to the array of \p num_rows values corresponding to the dense output vector y - int num_rows; ///< Number of rows of matrix A. - int num_cols; ///< Number of columns of matrix A. - int num_nonzeros; ///< Number of nonzero elements of matrix A. - ValueT alpha; ///< Alpha multiplicand - ValueT beta; ///< Beta addend-multiplicand - - TexRefInputIterator t_vector_x; -}; - - -/** - * \brief AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV. - */ -template < - typename AgentSpmvPolicyT, ///< Parameterized AgentSpmvPolicy tuning policy type - typename ValueT, ///< Matrix and vector value type - typename OffsetT, ///< Signed integer type for sequence offsets - bool HAS_ALPHA, ///< Whether the input parameter \p alpha is 1 - bool HAS_BETA, ///< Whether the input parameter \p beta is 0 - int PTX_ARCH = CUB_PTX_ARCH> ///< PTX compute capability -struct AgentSpmv -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - /// Constants - enum - { - BLOCK_THREADS = AgentSpmvPolicyT::BLOCK_THREADS, - ITEMS_PER_THREAD = AgentSpmvPolicyT::ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - }; - - /// 2D merge path coordinate type - typedef typename CubVector::Type CoordinateT; - - /// Input iterator wrapper types (for applying cache modifiers) - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::ROW_OFFSETS_SEARCH_LOAD_MODIFIER, - OffsetT, - OffsetT> - RowOffsetsSearchIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::ROW_OFFSETS_LOAD_MODIFIER, - OffsetT, - OffsetT> - RowOffsetsIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::COLUMN_INDICES_LOAD_MODIFIER, - OffsetT, - OffsetT> - ColumnIndicesIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::VALUES_LOAD_MODIFIER, - ValueT, - OffsetT> - ValueIteratorT; - - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::VECTOR_VALUES_LOAD_MODIFIER, - ValueT, - OffsetT> - VectorValueIteratorT; - - // Tuple type for scanning (pairs accumulated segment-value with segment-index) - typedef KeyValuePair KeyValuePairT; - - // Reduce-value-by-segment scan operator - typedef ReduceBySegmentOp ReduceBySegmentOpT; - - // Prefix functor type - typedef BlockScanRunningPrefixOp PrefixOpT; - - // BlockScan specialization - typedef BlockScan< - KeyValuePairT, - BLOCK_THREADS, - AgentSpmvPolicyT::SCAN_ALGORITHM> - BlockScanT; - - /// Shared memory type required by this thread block - struct _TempStorage - { - OffsetT tile_nonzero_idx; - OffsetT tile_nonzero_idx_end; - - // Smem needed for tile scanning - typename BlockScanT::TempStorage scan; - - // Smem needed for tile of merge items - ValueT nonzeros[TILE_ITEMS + 1]; - - }; - - /// Temporary storage type (unionable) - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - - _TempStorage& temp_storage; /// Reference to temp_storage - - SpmvParams& spmv_params; - - ValueIteratorT wd_values; ///< Wrapped pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix A. - RowOffsetsIteratorT wd_row_end_offsets; ///< Wrapped Pointer to the array of \p m offsets demarcating the end of every row in \p d_column_indices and \p d_values - ColumnIndicesIteratorT wd_column_indices; ///< Wrapped Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix A. (Indices are zero-valued.) - VectorValueIteratorT wd_vector_x; ///< Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector x - VectorValueIteratorT wd_vector_y; ///< Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector x - - - //--------------------------------------------------------------------- - // Interface - //--------------------------------------------------------------------- - - /** - * Constructor - */ - __device__ __forceinline__ AgentSpmv( - TempStorage& temp_storage, ///< Reference to temp_storage - SpmvParams& spmv_params) ///< SpMV input parameter bundle - : - temp_storage(temp_storage.Alias()), - spmv_params(spmv_params), - wd_values(spmv_params.d_values), - wd_row_end_offsets(spmv_params.d_row_end_offsets), - wd_column_indices(spmv_params.d_column_indices), - wd_vector_x(spmv_params.d_vector_x), - wd_vector_y(spmv_params.d_vector_y) - {} - - - __device__ __forceinline__ void InitNan(double& nan_token) - { - long long NAN_BITS = 0xFFF0000000000001; - nan_token = reinterpret_cast(NAN_BITS); // ValueT(0.0) / ValueT(0.0); - } - - - __device__ __forceinline__ void InitNan(float& nan_token) - { - int NAN_BITS = 0xFF800001; - nan_token = reinterpret_cast(NAN_BITS); // ValueT(0.0) / ValueT(0.0); - } - - - /** - * - */ - template - __device__ __forceinline__ void ConsumeStrip( - PrefixOpT& prefix_op, - ReduceBySegmentOpT& scan_op, - ValueT& row_total, - ValueT& row_start, - OffsetT& tile_nonzero_idx, - OffsetT tile_nonzero_idx_end, - OffsetT row_nonzero_idx, - OffsetT row_nonzero_idx_end) - { - ValueT NAN_TOKEN; - InitNan(NAN_TOKEN); - - - // - // Gather a strip of nonzeros into shared memory - // - - #pragma unroll - for (int ITEM = 0; ITEM < NNZ_PER_THREAD; ++ITEM) - { - - ValueT nonzero = 0.0; - - OffsetT local_nonzero_idx = (ITEM * BLOCK_THREADS) + threadIdx.x; - OffsetT nonzero_idx = tile_nonzero_idx + local_nonzero_idx; - - bool in_range = nonzero_idx < tile_nonzero_idx_end; - - OffsetT nonzero_idx2 = (in_range) ? - nonzero_idx : - tile_nonzero_idx_end - 1; - - OffsetT column_idx = wd_column_indices[nonzero_idx2]; - ValueT value = wd_values[nonzero_idx2]; - ValueT vector_value = wd_vector_x[column_idx]; - nonzero = value * vector_value; - - if (!in_range) - nonzero = 0.0; - - temp_storage.nonzeros[local_nonzero_idx] = nonzero; - } - - CTA_SYNC(); - - // - // Swap in NANs at local row start offsets - // - - OffsetT local_row_nonzero_idx = row_nonzero_idx - tile_nonzero_idx; - if ((local_row_nonzero_idx >= 0) && (local_row_nonzero_idx < TILE_ITEMS)) - { - // Thread's row starts in this strip - row_start = temp_storage.nonzeros[local_row_nonzero_idx]; - temp_storage.nonzeros[local_row_nonzero_idx] = NAN_TOKEN; - } - - CTA_SYNC(); - - // - // Segmented scan - // - - // Read strip of nonzeros into thread-blocked order, setup segment flags - KeyValuePairT scan_items[NNZ_PER_THREAD]; - for (int ITEM = 0; ITEM < NNZ_PER_THREAD; ++ITEM) - { - int local_nonzero_idx = (threadIdx.x * NNZ_PER_THREAD) + ITEM; - ValueT value = temp_storage.nonzeros[local_nonzero_idx]; - bool is_nan = (value != value); - - scan_items[ITEM].value = (is_nan) ? 0.0 : value; - scan_items[ITEM].key = is_nan; - } - - KeyValuePairT tile_aggregate; - KeyValuePairT scan_items_out[NNZ_PER_THREAD]; - - BlockScanT(temp_storage.scan).ExclusiveScan(scan_items, scan_items_out, scan_op, tile_aggregate, prefix_op); - - // Save the inclusive sum for the last row - if (threadIdx.x == 0) - { - temp_storage.nonzeros[TILE_ITEMS] = prefix_op.running_total.value; - } - - // Store segment totals - for (int ITEM = 0; ITEM < NNZ_PER_THREAD; ++ITEM) - { - int local_nonzero_idx = (threadIdx.x * NNZ_PER_THREAD) + ITEM; - - if (scan_items[ITEM].key) - temp_storage.nonzeros[local_nonzero_idx] = scan_items_out[ITEM].value; - } - - CTA_SYNC(); - - // - // Update row totals - // - - OffsetT local_row_nonzero_idx_end = row_nonzero_idx_end - tile_nonzero_idx; - if ((local_row_nonzero_idx_end >= 0) && (local_row_nonzero_idx_end < TILE_ITEMS)) - { - // Thread's row ends in this strip - row_total = temp_storage.nonzeros[local_row_nonzero_idx_end]; - } - - tile_nonzero_idx += NNZ_PER_THREAD * BLOCK_THREADS; - } - - - - /** - * Consume input tile - */ - __device__ __forceinline__ void ConsumeTile( - int tile_idx, - int rows_per_tile) - { - // - // Read in tile of row ranges - // - - // Row range for the thread block - OffsetT tile_row_idx = tile_idx * rows_per_tile; - OffsetT tile_row_idx_end = CUB_MIN(tile_row_idx + rows_per_tile, spmv_params.num_rows); - - // Thread's row - OffsetT row_idx = tile_row_idx + threadIdx.x; - ValueT row_total = 0.0; - ValueT row_start = 0.0; - - // Nonzero range for the thread's row - OffsetT row_nonzero_idx = -1; - OffsetT row_nonzero_idx_end = -1; - - if (row_idx < tile_row_idx_end) - { - row_nonzero_idx = wd_row_end_offsets[row_idx - 1]; - row_nonzero_idx_end = wd_row_end_offsets[row_idx]; - - // Share block's starting nonzero offset - if (threadIdx.x == 0) - temp_storage.tile_nonzero_idx = row_nonzero_idx; - - // Share block's ending nonzero offset - if (row_idx == tile_row_idx_end - 1) - temp_storage.tile_nonzero_idx_end = row_nonzero_idx_end; - - // Zero-length rows don't participate - if (row_nonzero_idx == row_nonzero_idx_end) - { - row_nonzero_idx = -1; - row_nonzero_idx_end = -1; - } - } - - CTA_SYNC(); - - // - // Process strips of nonzeros - // - - // Nonzero range for the thread block - OffsetT tile_nonzero_idx = temp_storage.tile_nonzero_idx; - OffsetT tile_nonzero_idx_end = temp_storage.tile_nonzero_idx_end; - - KeyValuePairT tile_prefix(0, 0.0); - ReduceBySegmentOpT scan_op; - PrefixOpT prefix_op(tile_prefix, scan_op); - - #pragma unroll 1 - while (tile_nonzero_idx < tile_nonzero_idx_end) - { - ConsumeStrip(prefix_op, scan_op, row_total, row_start, - tile_nonzero_idx, tile_nonzero_idx_end, row_nonzero_idx, row_nonzero_idx_end); - - CTA_SYNC(); - } - - // - // Output to y - // - - if (row_idx < tile_row_idx_end) - { - if (row_nonzero_idx_end == tile_nonzero_idx_end) - { - // Last row grabs the inclusive sum - row_total = temp_storage.nonzeros[TILE_ITEMS]; - } - - spmv_params.d_vector_y[row_idx] = row_start + row_total; - } - } - - -}; - - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/agent/single_pass_scan_operators.cuh b/ml-xgboost/cub/cub/agent/single_pass_scan_operators.cuh deleted file mode 100644 index 1674318..0000000 --- a/ml-xgboost/cub/cub/agent/single_pass_scan_operators.cuh +++ /dev/null @@ -1,792 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Callback operator types for supplying BlockScan prefixes - */ - -#pragma once - -#include - -#include "../thread/thread_load.cuh" -#include "../thread/thread_store.cuh" -#include "../warp/warp_reduce.cuh" -#include "../util_arch.cuh" -#include "../util_device.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Prefix functor type for maintaining a running prefix while scanning a - * region independent of other thread blocks - ******************************************************************************/ - -/** - * Stateful callback operator type for supplying BlockScan prefixes. - * Maintains a running prefix that can be applied to consecutive - * BlockScan operations. - */ -template < - typename T, ///< BlockScan value type - typename ScanOpT> ///< Wrapped scan operator type -struct BlockScanRunningPrefixOp -{ - ScanOpT op; ///< Wrapped scan operator - T running_total; ///< Running block-wide prefix - - /// Constructor - __device__ __forceinline__ BlockScanRunningPrefixOp(ScanOpT op) - : - op(op) - {} - - /// Constructor - __device__ __forceinline__ BlockScanRunningPrefixOp( - T starting_prefix, - ScanOpT op) - : - op(op), - running_total(starting_prefix) - {} - - /** - * Prefix callback operator. Returns the block-wide running_total in thread-0. - */ - __device__ __forceinline__ T operator()( - const T &block_aggregate) ///< The aggregate sum of the BlockScan inputs - { - T retval = running_total; - running_total = op(running_total, block_aggregate); - return retval; - } -}; - - -/****************************************************************************** - * Generic tile status interface types for block-cooperative scans - ******************************************************************************/ - -/** - * Enumerations of tile status - */ -enum ScanTileStatus -{ - SCAN_TILE_OOB, // Out-of-bounds (e.g., padding) - SCAN_TILE_INVALID = 99, // Not yet processed - SCAN_TILE_PARTIAL, // Tile aggregate is available - SCAN_TILE_INCLUSIVE, // Inclusive tile prefix is available -}; - - -/** - * Tile status interface. - */ -template < - typename T, - bool SINGLE_WORD = Traits::PRIMITIVE> -struct ScanTileState; - - -/** - * Tile status interface specialized for scan status and value types - * that can be combined into one machine word that can be - * read/written coherently in a single access. - */ -template -struct ScanTileState -{ - // Status word type - typedef typename If<(sizeof(T) == 8), - long long, - typename If<(sizeof(T) == 4), - int, - typename If<(sizeof(T) == 2), - short, - char>::Type>::Type>::Type StatusWord; - - - // Unit word type - typedef typename If<(sizeof(T) == 8), - longlong2, - typename If<(sizeof(T) == 4), - int2, - typename If<(sizeof(T) == 2), - int, - uchar2>::Type>::Type>::Type TxnWord; - - - // Device word type - struct TileDescriptor - { - StatusWord status; - T value; - }; - - - // Constants - enum - { - TILE_STATUS_PADDING = CUB_PTX_WARP_THREADS, - }; - - - // Device storage - TileDescriptor *d_tile_status; - - - /// Constructor - __host__ __device__ __forceinline__ - ScanTileState() - : - d_tile_status(NULL) - {} - - - /// Initializer - __host__ __device__ __forceinline__ - cudaError_t Init( - int /*num_tiles*/, ///< [in] Number of tiles - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t /*temp_storage_bytes*/) ///< [in] Size in bytes of \t d_temp_storage allocation - { - d_tile_status = reinterpret_cast(d_temp_storage); - return cudaSuccess; - } - - - /** - * Compute device memory needed for tile status - */ - __host__ __device__ __forceinline__ - static cudaError_t AllocationSize( - int num_tiles, ///< [in] Number of tiles - size_t &temp_storage_bytes) ///< [out] Size in bytes of \t d_temp_storage allocation - { - temp_storage_bytes = (num_tiles + TILE_STATUS_PADDING) * sizeof(TileDescriptor); // bytes needed for tile status descriptors - return cudaSuccess; - } - - - /** - * Initialize (from device) - */ - __device__ __forceinline__ void InitializeStatus(int num_tiles) - { - int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x; - if (tile_idx < num_tiles) - { - // Not-yet-set - d_tile_status[TILE_STATUS_PADDING + tile_idx].status = StatusWord(SCAN_TILE_INVALID); - } - - if ((blockIdx.x == 0) && (threadIdx.x < TILE_STATUS_PADDING)) - { - // Padding - d_tile_status[threadIdx.x].status = StatusWord(SCAN_TILE_OOB); - } - } - - - /** - * Update the specified tile's inclusive value and corresponding status - */ - __device__ __forceinline__ void SetInclusive(int tile_idx, T tile_inclusive) - { - TileDescriptor tile_descriptor; - tile_descriptor.status = SCAN_TILE_INCLUSIVE; - tile_descriptor.value = tile_inclusive; - - TxnWord alias; - *reinterpret_cast(&alias) = tile_descriptor; - ThreadStore(reinterpret_cast(d_tile_status + TILE_STATUS_PADDING + tile_idx), alias); - } - - - /** - * Update the specified tile's partial value and corresponding status - */ - __device__ __forceinline__ void SetPartial(int tile_idx, T tile_partial) - { - TileDescriptor tile_descriptor; - tile_descriptor.status = SCAN_TILE_PARTIAL; - tile_descriptor.value = tile_partial; - - TxnWord alias; - *reinterpret_cast(&alias) = tile_descriptor; - ThreadStore(reinterpret_cast(d_tile_status + TILE_STATUS_PADDING + tile_idx), alias); - } - - /** - * Wait for the corresponding tile to become non-invalid - */ - __device__ __forceinline__ void WaitForValid( - int tile_idx, - StatusWord &status, - T &value) - { - TileDescriptor tile_descriptor; - do - { - __threadfence_block(); // prevent hoisting loads from loop - TxnWord alias = ThreadLoad(reinterpret_cast(d_tile_status + TILE_STATUS_PADDING + tile_idx)); - tile_descriptor = reinterpret_cast(alias); - - } while (WARP_ANY((tile_descriptor.status == SCAN_TILE_INVALID), 0xffffffff)); - - status = tile_descriptor.status; - value = tile_descriptor.value; - } - -}; - - - -/** - * Tile status interface specialized for scan status and value types that - * cannot be combined into one machine word. - */ -template -struct ScanTileState -{ - // Status word type - typedef char StatusWord; - - // Constants - enum - { - TILE_STATUS_PADDING = CUB_PTX_WARP_THREADS, - }; - - // Device storage - StatusWord *d_tile_status; - T *d_tile_partial; - T *d_tile_inclusive; - - /// Constructor - __host__ __device__ __forceinline__ - ScanTileState() - : - d_tile_status(NULL), - d_tile_partial(NULL), - d_tile_inclusive(NULL) - {} - - - /// Initializer - __host__ __device__ __forceinline__ - cudaError_t Init( - int num_tiles, ///< [in] Number of tiles - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t temp_storage_bytes) ///< [in] Size in bytes of \t d_temp_storage allocation - { - cudaError_t error = cudaSuccess; - do - { - void* allocations[3]; - size_t allocation_sizes[3]; - - allocation_sizes[0] = (num_tiles + TILE_STATUS_PADDING) * sizeof(StatusWord); // bytes needed for tile status descriptors - allocation_sizes[1] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized); // bytes needed for partials - allocation_sizes[2] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized); // bytes needed for inclusives - - // Compute allocation pointers into the single storage blob - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - - // Alias the offsets - d_tile_status = reinterpret_cast(allocations[0]); - d_tile_partial = reinterpret_cast(allocations[1]); - d_tile_inclusive = reinterpret_cast(allocations[2]); - } - while (0); - - return error; - } - - - /** - * Compute device memory needed for tile status - */ - __host__ __device__ __forceinline__ - static cudaError_t AllocationSize( - int num_tiles, ///< [in] Number of tiles - size_t &temp_storage_bytes) ///< [out] Size in bytes of \t d_temp_storage allocation - { - // Specify storage allocation requirements - size_t allocation_sizes[3]; - allocation_sizes[0] = (num_tiles + TILE_STATUS_PADDING) * sizeof(StatusWord); // bytes needed for tile status descriptors - allocation_sizes[1] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized); // bytes needed for partials - allocation_sizes[2] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized); // bytes needed for inclusives - - // Set the necessary size of the blob - void* allocations[3]; - return CubDebug(AliasTemporaries(NULL, temp_storage_bytes, allocations, allocation_sizes)); - } - - - /** - * Initialize (from device) - */ - __device__ __forceinline__ void InitializeStatus(int num_tiles) - { - int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x; - if (tile_idx < num_tiles) - { - // Not-yet-set - d_tile_status[TILE_STATUS_PADDING + tile_idx] = StatusWord(SCAN_TILE_INVALID); - } - - if ((blockIdx.x == 0) && (threadIdx.x < TILE_STATUS_PADDING)) - { - // Padding - d_tile_status[threadIdx.x] = StatusWord(SCAN_TILE_OOB); - } - } - - - /** - * Update the specified tile's inclusive value and corresponding status - */ - __device__ __forceinline__ void SetInclusive(int tile_idx, T tile_inclusive) - { - // Update tile inclusive value - ThreadStore(d_tile_inclusive + TILE_STATUS_PADDING + tile_idx, tile_inclusive); - - // Fence - __threadfence(); - - // Update tile status - ThreadStore(d_tile_status + TILE_STATUS_PADDING + tile_idx, StatusWord(SCAN_TILE_INCLUSIVE)); - } - - - /** - * Update the specified tile's partial value and corresponding status - */ - __device__ __forceinline__ void SetPartial(int tile_idx, T tile_partial) - { - // Update tile partial value - ThreadStore(d_tile_partial + TILE_STATUS_PADDING + tile_idx, tile_partial); - - // Fence - __threadfence(); - - // Update tile status - ThreadStore(d_tile_status + TILE_STATUS_PADDING + tile_idx, StatusWord(SCAN_TILE_PARTIAL)); - } - - /** - * Wait for the corresponding tile to become non-invalid - */ - __device__ __forceinline__ void WaitForValid( - int tile_idx, - StatusWord &status, - T &value) - { - do { - status = ThreadLoad(d_tile_status + TILE_STATUS_PADDING + tile_idx); - - __threadfence(); // prevent hoisting loads from loop or loads below above this one - - } while (status == SCAN_TILE_INVALID); - - if (status == StatusWord(SCAN_TILE_PARTIAL)) - value = ThreadLoad(d_tile_partial + TILE_STATUS_PADDING + tile_idx); - else - value = ThreadLoad(d_tile_inclusive + TILE_STATUS_PADDING + tile_idx); - } -}; - - -/****************************************************************************** - * ReduceByKey tile status interface types for block-cooperative scans - ******************************************************************************/ - -/** - * Tile status interface for reduction by key. - * - */ -template < - typename ValueT, - typename KeyT, - bool SINGLE_WORD = (Traits::PRIMITIVE) && (sizeof(ValueT) + sizeof(KeyT) < 16)> -struct ReduceByKeyScanTileState; - - -/** - * Tile status interface for reduction by key, specialized for scan status and value types that - * cannot be combined into one machine word. - */ -template < - typename ValueT, - typename KeyT> -struct ReduceByKeyScanTileState : - ScanTileState > -{ - typedef ScanTileState > SuperClass; - - /// Constructor - __host__ __device__ __forceinline__ - ReduceByKeyScanTileState() : SuperClass() {} -}; - - -/** - * Tile status interface for reduction by key, specialized for scan status and value types that - * can be combined into one machine word that can be read/written coherently in a single access. - */ -template < - typename ValueT, - typename KeyT> -struct ReduceByKeyScanTileState -{ - typedef KeyValuePairKeyValuePairT; - - // Constants - enum - { - PAIR_SIZE = sizeof(ValueT) + sizeof(KeyT), - TXN_WORD_SIZE = 1 << Log2::VALUE, - STATUS_WORD_SIZE = TXN_WORD_SIZE - PAIR_SIZE, - - TILE_STATUS_PADDING = CUB_PTX_WARP_THREADS, - }; - - // Status word type - typedef typename If<(STATUS_WORD_SIZE == 8), - long long, - typename If<(STATUS_WORD_SIZE == 4), - int, - typename If<(STATUS_WORD_SIZE == 2), - short, - char>::Type>::Type>::Type StatusWord; - - // Status word type - typedef typename If<(TXN_WORD_SIZE == 16), - longlong2, - typename If<(TXN_WORD_SIZE == 8), - long long, - int>::Type>::Type TxnWord; - - // Device word type (for when sizeof(ValueT) == sizeof(KeyT)) - struct TileDescriptorBigStatus - { - KeyT key; - ValueT value; - StatusWord status; - }; - - // Device word type (for when sizeof(ValueT) != sizeof(KeyT)) - struct TileDescriptorLittleStatus - { - ValueT value; - StatusWord status; - KeyT key; - }; - - // Device word type - typedef typename If< - (sizeof(ValueT) == sizeof(KeyT)), - TileDescriptorBigStatus, - TileDescriptorLittleStatus>::Type - TileDescriptor; - - - // Device storage - TileDescriptor *d_tile_status; - - - /// Constructor - __host__ __device__ __forceinline__ - ReduceByKeyScanTileState() - : - d_tile_status(NULL) - {} - - - /// Initializer - __host__ __device__ __forceinline__ - cudaError_t Init( - int /*num_tiles*/, ///< [in] Number of tiles - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t /*temp_storage_bytes*/) ///< [in] Size in bytes of \t d_temp_storage allocation - { - d_tile_status = reinterpret_cast(d_temp_storage); - return cudaSuccess; - } - - - /** - * Compute device memory needed for tile status - */ - __host__ __device__ __forceinline__ - static cudaError_t AllocationSize( - int num_tiles, ///< [in] Number of tiles - size_t &temp_storage_bytes) ///< [out] Size in bytes of \t d_temp_storage allocation - { - temp_storage_bytes = (num_tiles + TILE_STATUS_PADDING) * sizeof(TileDescriptor); // bytes needed for tile status descriptors - return cudaSuccess; - } - - - /** - * Initialize (from device) - */ - __device__ __forceinline__ void InitializeStatus(int num_tiles) - { - int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x; - if (tile_idx < num_tiles) - { - // Not-yet-set - d_tile_status[TILE_STATUS_PADDING + tile_idx].status = StatusWord(SCAN_TILE_INVALID); - } - - if ((blockIdx.x == 0) && (threadIdx.x < TILE_STATUS_PADDING)) - { - // Padding - d_tile_status[threadIdx.x].status = StatusWord(SCAN_TILE_OOB); - } - } - - - /** - * Update the specified tile's inclusive value and corresponding status - */ - __device__ __forceinline__ void SetInclusive(int tile_idx, KeyValuePairT tile_inclusive) - { - TileDescriptor tile_descriptor; - tile_descriptor.status = SCAN_TILE_INCLUSIVE; - tile_descriptor.value = tile_inclusive.value; - tile_descriptor.key = tile_inclusive.key; - - TxnWord alias; - *reinterpret_cast(&alias) = tile_descriptor; - ThreadStore(reinterpret_cast(d_tile_status + TILE_STATUS_PADDING + tile_idx), alias); - } - - - /** - * Update the specified tile's partial value and corresponding status - */ - __device__ __forceinline__ void SetPartial(int tile_idx, KeyValuePairT tile_partial) - { - TileDescriptor tile_descriptor; - tile_descriptor.status = SCAN_TILE_PARTIAL; - tile_descriptor.value = tile_partial.value; - tile_descriptor.key = tile_partial.key; - - TxnWord alias; - *reinterpret_cast(&alias) = tile_descriptor; - ThreadStore(reinterpret_cast(d_tile_status + TILE_STATUS_PADDING + tile_idx), alias); - } - - /** - * Wait for the corresponding tile to become non-invalid - */ - __device__ __forceinline__ void WaitForValid( - int tile_idx, - StatusWord &status, - KeyValuePairT &value) - { - TxnWord alias = ThreadLoad(reinterpret_cast(d_tile_status + TILE_STATUS_PADDING + tile_idx)); - TileDescriptor tile_descriptor = reinterpret_cast(alias); - - while (tile_descriptor.status == SCAN_TILE_INVALID) - { - __threadfence_block(); // prevent hoisting loads from loop - - alias = ThreadLoad(reinterpret_cast(d_tile_status + TILE_STATUS_PADDING + tile_idx)); - tile_descriptor = reinterpret_cast(alias); - } - - status = tile_descriptor.status; - value.value = tile_descriptor.value; - value.key = tile_descriptor.key; - } - -}; - - -/****************************************************************************** - * Prefix call-back operator for coupling local block scan within a - * block-cooperative scan - ******************************************************************************/ - -/** - * Stateful block-scan prefix functor. Provides the the running prefix for - * the current tile by using the call-back warp to wait on on - * aggregates/prefixes from predecessor tiles to become available. - */ -template < - typename T, - typename ScanOpT, - typename ScanTileStateT, - int PTX_ARCH = CUB_PTX_ARCH> -struct TilePrefixCallbackOp -{ - // Parameterized warp reduce - typedef WarpReduce WarpReduceT; - - // Temporary storage type - struct _TempStorage - { - typename WarpReduceT::TempStorage warp_reduce; - T exclusive_prefix; - T inclusive_prefix; - T block_aggregate; - }; - - // Alias wrapper allowing temporary storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - // Type of status word - typedef typename ScanTileStateT::StatusWord StatusWord; - - // Fields - _TempStorage& temp_storage; ///< Reference to a warp-reduction instance - ScanTileStateT& tile_status; ///< Interface to tile status - ScanOpT scan_op; ///< Binary scan operator - int tile_idx; ///< The current tile index - T exclusive_prefix; ///< Exclusive prefix for the tile - T inclusive_prefix; ///< Inclusive prefix for the tile - - // Constructor - __device__ __forceinline__ - TilePrefixCallbackOp( - ScanTileStateT &tile_status, - TempStorage &temp_storage, - ScanOpT scan_op, - int tile_idx) - : - temp_storage(temp_storage.Alias()), - tile_status(tile_status), - scan_op(scan_op), - tile_idx(tile_idx) {} - - - // Block until all predecessors within the warp-wide window have non-invalid status - __device__ __forceinline__ - void ProcessWindow( - int predecessor_idx, ///< Preceding tile index to inspect - StatusWord &predecessor_status, ///< [out] Preceding tile status - T &window_aggregate) ///< [out] Relevant partial reduction from this window of preceding tiles - { - T value; - tile_status.WaitForValid(predecessor_idx, predecessor_status, value); - - // Perform a segmented reduction to get the prefix for the current window. - // Use the swizzled scan operator because we are now scanning *down* towards thread0. - - int tail_flag = (predecessor_status == StatusWord(SCAN_TILE_INCLUSIVE)); - window_aggregate = WarpReduceT(temp_storage.warp_reduce).TailSegmentedReduce( - value, - tail_flag, - SwizzleScanOp(scan_op)); - } - - - // BlockScan prefix callback functor (called by the first warp) - __device__ __forceinline__ - T operator()(T block_aggregate) - { - - // Update our status with our tile-aggregate - if (threadIdx.x == 0) - { - temp_storage.block_aggregate = block_aggregate; - tile_status.SetPartial(tile_idx, block_aggregate); - } - - int predecessor_idx = tile_idx - threadIdx.x - 1; - StatusWord predecessor_status; - T window_aggregate; - - // Wait for the warp-wide window of predecessor tiles to become valid - ProcessWindow(predecessor_idx, predecessor_status, window_aggregate); - - // The exclusive tile prefix starts out as the current window aggregate - exclusive_prefix = window_aggregate; - - // Keep sliding the window back until we come across a tile whose inclusive prefix is known - while (WARP_ALL((predecessor_status != StatusWord(SCAN_TILE_INCLUSIVE)), 0xffffffff)) - { - predecessor_idx -= CUB_PTX_WARP_THREADS; - - // Update exclusive tile prefix with the window prefix - ProcessWindow(predecessor_idx, predecessor_status, window_aggregate); - exclusive_prefix = scan_op(window_aggregate, exclusive_prefix); - } - - // Compute the inclusive tile prefix and update the status for this tile - if (threadIdx.x == 0) - { - inclusive_prefix = scan_op(exclusive_prefix, block_aggregate); - tile_status.SetInclusive(tile_idx, inclusive_prefix); - - temp_storage.exclusive_prefix = exclusive_prefix; - temp_storage.inclusive_prefix = inclusive_prefix; - } - - // Return exclusive_prefix - return exclusive_prefix; - } - - // Get the exclusive prefix stored in temporary storage - __device__ __forceinline__ - T GetExclusivePrefix() - { - return temp_storage.exclusive_prefix; - } - - // Get the inclusive prefix stored in temporary storage - __device__ __forceinline__ - T GetInclusivePrefix() - { - return temp_storage.inclusive_prefix; - } - - // Get the block aggregate stored in temporary storage - __device__ __forceinline__ - T GetBlockAggregate() - { - return temp_storage.block_aggregate; - } - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/block_adjacent_difference.cuh b/ml-xgboost/cub/cub/block/block_adjacent_difference.cuh deleted file mode 100644 index 7aeaaab..0000000 --- a/ml-xgboost/cub/cub/block/block_adjacent_difference.cuh +++ /dev/null @@ -1,596 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::BlockDiscontinuity class provides [collective](index.html#sec0) methods for flagging discontinuities within an ordered set of items partitioned across a CUDA thread block. - */ - -#pragma once - -#include "../util_type.cuh" -#include "../util_ptx.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -template < - typename T, - int BLOCK_DIM_X, - int BLOCK_DIM_Y = 1, - int BLOCK_DIM_Z = 1, - int PTX_ARCH = CUB_PTX_ARCH> -class BlockAdjacentDifference -{ -private: - - /****************************************************************************** - * Constants and type definitions - ******************************************************************************/ - - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - }; - - - /// Shared memory storage layout type (last element from each thread's input) - struct _TempStorage - { - T first_items[BLOCK_THREADS]; - T last_items[BLOCK_THREADS]; - }; - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /// Internal storage allocator - __device__ __forceinline__ _TempStorage& PrivateStorage() - { - __shared__ _TempStorage private_storage; - return private_storage; - } - - - /// Specialization for when FlagOp has third index param - template ::HAS_PARAM> - struct ApplyOp - { - // Apply flag operator - static __device__ __forceinline__ T FlagT(FlagOp flag_op, const T &a, const T &b, int idx) - { - return flag_op(b, a, idx); - } - }; - - /// Specialization for when FlagOp does not have a third index param - template - struct ApplyOp - { - // Apply flag operator - static __device__ __forceinline__ T FlagT(FlagOp flag_op, const T &a, const T &b, int /*idx*/) - { - return flag_op(b, a); - } - }; - - /// Templated unrolling of item comparison (inductive case) - template - struct Iterate - { - // Head flags - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - static __device__ __forceinline__ void FlagHeads( - int linear_tid, - FlagT (&flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - preds[ITERATION] = input[ITERATION - 1]; - - flags[ITERATION] = ApplyOp::FlagT( - flag_op, - preds[ITERATION], - input[ITERATION], - (linear_tid * ITEMS_PER_THREAD) + ITERATION); - - Iterate::FlagHeads(linear_tid, flags, input, preds, flag_op); - } - - // Tail flags - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - static __device__ __forceinline__ void FlagTails( - int linear_tid, - FlagT (&flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - flags[ITERATION] = ApplyOp::FlagT( - flag_op, - input[ITERATION], - input[ITERATION + 1], - (linear_tid * ITEMS_PER_THREAD) + ITERATION + 1); - - Iterate::FlagTails(linear_tid, flags, input, flag_op); - } - - }; - - /// Templated unrolling of item comparison (termination case) - template - struct Iterate - { - // Head flags - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - static __device__ __forceinline__ void FlagHeads( - int /*linear_tid*/, - FlagT (&/*flags*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&/*input*/)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&/*preds*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items - FlagOp /*flag_op*/) ///< [in] Binary boolean flag predicate - {} - - // Tail flags - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - static __device__ __forceinline__ void FlagTails( - int /*linear_tid*/, - FlagT (&/*flags*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&/*input*/)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp /*flag_op*/) ///< [in] Binary boolean flag predicate - {} - }; - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Shared storage reference - _TempStorage &temp_storage; - - /// Linear thread-id - unsigned int linear_tid; - - -public: - - /// \smemstorage{BlockDiscontinuity} - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using a private static allocation of shared memory as temporary storage. - */ - __device__ __forceinline__ BlockAdjacentDifference() - : - temp_storage(PrivateStorage()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. - */ - __device__ __forceinline__ BlockAdjacentDifference( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - //@} end member group - /******************************************************************//** - * \name Head flag operations - *********************************************************************/ - //@{ - - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeads( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share last item - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - if (linear_tid == 0) - { - // Set flag for first thread-item (preds[0] is undefined) - head_flags[0] = 1; - } - else - { - preds[0] = temp_storage.last_items[linear_tid - 1]; - head_flags[0] = ApplyOp::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); - } - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - } - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeads( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items - FlagOp flag_op, ///< [in] Binary boolean flag predicate - T tile_predecessor_item) ///< [in] [thread0 only] Item with which to compare the first tile item (input0 from thread0). - { - // Share last item - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - // Set flag for first thread-item - preds[0] = (linear_tid == 0) ? - tile_predecessor_item : // First thread - temp_storage.last_items[linear_tid - 1]; - - head_flags[0] = ApplyOp::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - } - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeads( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - T preds[ITEMS_PER_THREAD]; - FlagHeads(head_flags, input, preds, flag_op); - } - - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeads( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op, ///< [in] Binary boolean flag predicate - T tile_predecessor_item) ///< [in] [thread0 only] Item with which to compare the first tile item (input0 from thread0). - { - T preds[ITEMS_PER_THREAD]; - FlagHeads(head_flags, input, preds, flag_op, tile_predecessor_item); - } - - - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagTails( - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share first item - temp_storage.first_items[linear_tid] = input[0]; - - CTA_SYNC(); - - // Set flag for last thread-item - tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? - 1 : // Last thread - ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - temp_storage.first_items[linear_tid + 1], - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagTails( - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op, ///< [in] Binary boolean flag predicate - T tile_successor_item) ///< [in] [threadBLOCK_THREADS-1 only] Item with which to compare the last tile item (inputITEMS_PER_THREAD-1 from threadBLOCK_THREADS-1). - { - // Share first item - temp_storage.first_items[linear_tid] = input[0]; - - CTA_SYNC(); - - // Set flag for last thread-item - T successor_item = (linear_tid == BLOCK_THREADS - 1) ? - tile_successor_item : // Last thread - temp_storage.first_items[linear_tid + 1]; - - tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - successor_item, - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeadsAndTails( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share first and last items - temp_storage.first_items[linear_tid] = input[0]; - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - T preds[ITEMS_PER_THREAD]; - - // Set flag for first thread-item - preds[0] = temp_storage.last_items[linear_tid - 1]; - if (linear_tid == 0) - { - head_flags[0] = 1; - } - else - { - head_flags[0] = ApplyOp::FlagT( - flag_op, - preds[0], - input[0], - linear_tid * ITEMS_PER_THREAD); - } - - - // Set flag for last thread-item - tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? - 1 : // Last thread - ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - temp_storage.first_items[linear_tid + 1], - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeadsAndTails( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T tile_successor_item, ///< [in] [threadBLOCK_THREADS-1 only] Item with which to compare the last tile item (inputITEMS_PER_THREAD-1 from threadBLOCK_THREADS-1). - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share first and last items - temp_storage.first_items[linear_tid] = input[0]; - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - T preds[ITEMS_PER_THREAD]; - - // Set flag for first thread-item - if (linear_tid == 0) - { - head_flags[0] = 1; - } - else - { - preds[0] = temp_storage.last_items[linear_tid - 1]; - head_flags[0] = ApplyOp::FlagT( - flag_op, - preds[0], - input[0], - linear_tid * ITEMS_PER_THREAD); - } - - // Set flag for last thread-item - T successor_item = (linear_tid == BLOCK_THREADS - 1) ? - tile_successor_item : // Last thread - temp_storage.first_items[linear_tid + 1]; - - tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - successor_item, - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeadsAndTails( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T tile_predecessor_item, ///< [in] [thread0 only] Item with which to compare the first tile item (input0 from thread0). - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share first and last items - temp_storage.first_items[linear_tid] = input[0]; - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - T preds[ITEMS_PER_THREAD]; - - // Set flag for first thread-item - preds[0] = (linear_tid == 0) ? - tile_predecessor_item : // First thread - temp_storage.last_items[linear_tid - 1]; - - head_flags[0] = ApplyOp::FlagT( - flag_op, - preds[0], - input[0], - linear_tid * ITEMS_PER_THREAD); - - // Set flag for last thread-item - tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? - 1 : // Last thread - ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - temp_storage.first_items[linear_tid + 1], - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeadsAndTails( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T tile_predecessor_item, ///< [in] [thread0 only] Item with which to compare the first tile item (input0 from thread0). - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T tile_successor_item, ///< [in] [threadBLOCK_THREADS-1 only] Item with which to compare the last tile item (inputITEMS_PER_THREAD-1 from threadBLOCK_THREADS-1). - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share first and last items - temp_storage.first_items[linear_tid] = input[0]; - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - T preds[ITEMS_PER_THREAD]; - - // Set flag for first thread-item - preds[0] = (linear_tid == 0) ? - tile_predecessor_item : // First thread - temp_storage.last_items[linear_tid - 1]; - - head_flags[0] = ApplyOp::FlagT( - flag_op, - preds[0], - input[0], - linear_tid * ITEMS_PER_THREAD); - - // Set flag for last thread-item - T successor_item = (linear_tid == BLOCK_THREADS - 1) ? - tile_successor_item : // Last thread - temp_storage.first_items[linear_tid + 1]; - - tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - successor_item, - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/block/block_discontinuity.cuh b/ml-xgboost/cub/cub/block/block_discontinuity.cuh deleted file mode 100644 index 2f3bb79..0000000 --- a/ml-xgboost/cub/cub/block/block_discontinuity.cuh +++ /dev/null @@ -1,1148 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::BlockDiscontinuity class provides [collective](index.html#sec0) methods for flagging discontinuities within an ordered set of items partitioned across a CUDA thread block. - */ - -#pragma once - -#include "../util_type.cuh" -#include "../util_ptx.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief The BlockDiscontinuity class provides [collective](index.html#sec0) methods for flagging discontinuities within an ordered set of items partitioned across a CUDA thread block. ![](discont_logo.png) - * \ingroup BlockModule - * - * \tparam T The data type to be flagged. - * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension - * \tparam BLOCK_DIM_Y [optional] The thread block length in threads along the Y dimension (default: 1) - * \tparam BLOCK_DIM_Z [optional] The thread block length in threads along the Z dimension (default: 1) - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * - A set of "head flags" (or "tail flags") is often used to indicate corresponding items - * that differ from their predecessors (or successors). For example, head flags are convenient - * for demarcating disjoint data segments as part of a segmented scan or reduction. - * - \blocked - * - * \par Performance Considerations - * - \granularity - * - * \par A Simple Example - * \blockcollective{BlockDiscontinuity} - * \par - * The code snippet below illustrates the head flagging of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int - * typedef cub::BlockDiscontinuity BlockDiscontinuity; - * - * // Allocate shared memory for BlockDiscontinuity - * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute head flags for discontinuities in the segment - * int head_flags[4]; - * BlockDiscontinuity(temp_storage).FlagHeads(head_flags, thread_data, cub::Inequality()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [0,0,1,1], [1,1,1,1], [2,3,3,3], [3,4,4,4], ... }. - * The corresponding output \p head_flags in those threads will be - * { [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }. - * - * \par Performance Considerations - * - Incurs zero bank conflicts for most types - * - */ -template < - typename T, - int BLOCK_DIM_X, - int BLOCK_DIM_Y = 1, - int BLOCK_DIM_Z = 1, - int PTX_ARCH = CUB_PTX_ARCH> -class BlockDiscontinuity -{ -private: - - /****************************************************************************** - * Constants and type definitions - ******************************************************************************/ - - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - }; - - - /// Shared memory storage layout type (last element from each thread's input) - struct _TempStorage - { - T first_items[BLOCK_THREADS]; - T last_items[BLOCK_THREADS]; - }; - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /// Internal storage allocator - __device__ __forceinline__ _TempStorage& PrivateStorage() - { - __shared__ _TempStorage private_storage; - return private_storage; - } - - - /// Specialization for when FlagOp has third index param - template ::HAS_PARAM> - struct ApplyOp - { - // Apply flag operator - static __device__ __forceinline__ bool FlagT(FlagOp flag_op, const T &a, const T &b, int idx) - { - return flag_op(a, b, idx); - } - }; - - /// Specialization for when FlagOp does not have a third index param - template - struct ApplyOp - { - // Apply flag operator - static __device__ __forceinline__ bool FlagT(FlagOp flag_op, const T &a, const T &b, int /*idx*/) - { - return flag_op(a, b); - } - }; - - /// Templated unrolling of item comparison (inductive case) - template - struct Iterate - { - // Head flags - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - static __device__ __forceinline__ void FlagHeads( - int linear_tid, - FlagT (&flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - preds[ITERATION] = input[ITERATION - 1]; - - flags[ITERATION] = ApplyOp::FlagT( - flag_op, - preds[ITERATION], - input[ITERATION], - (linear_tid * ITEMS_PER_THREAD) + ITERATION); - - Iterate::FlagHeads(linear_tid, flags, input, preds, flag_op); - } - - // Tail flags - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - static __device__ __forceinline__ void FlagTails( - int linear_tid, - FlagT (&flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - flags[ITERATION] = ApplyOp::FlagT( - flag_op, - input[ITERATION], - input[ITERATION + 1], - (linear_tid * ITEMS_PER_THREAD) + ITERATION + 1); - - Iterate::FlagTails(linear_tid, flags, input, flag_op); - } - - }; - - /// Templated unrolling of item comparison (termination case) - template - struct Iterate - { - // Head flags - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - static __device__ __forceinline__ void FlagHeads( - int /*linear_tid*/, - FlagT (&/*flags*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&/*input*/)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&/*preds*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items - FlagOp /*flag_op*/) ///< [in] Binary boolean flag predicate - {} - - // Tail flags - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - static __device__ __forceinline__ void FlagTails( - int /*linear_tid*/, - FlagT (&/*flags*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&/*input*/)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp /*flag_op*/) ///< [in] Binary boolean flag predicate - {} - }; - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Shared storage reference - _TempStorage &temp_storage; - - /// Linear thread-id - unsigned int linear_tid; - - -public: - - /// \smemstorage{BlockDiscontinuity} - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using a private static allocation of shared memory as temporary storage. - */ - __device__ __forceinline__ BlockDiscontinuity() - : - temp_storage(PrivateStorage()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. - */ - __device__ __forceinline__ BlockDiscontinuity( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - //@} end member group - /******************************************************************//** - * \name Head flag operations - *********************************************************************/ - //@{ - - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeads( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share last item - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - if (linear_tid == 0) - { - // Set flag for first thread-item (preds[0] is undefined) - head_flags[0] = 1; - } - else - { - preds[0] = temp_storage.last_items[linear_tid - 1]; - head_flags[0] = ApplyOp::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); - } - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - } - - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeads( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items - FlagOp flag_op, ///< [in] Binary boolean flag predicate - T tile_predecessor_item) ///< [in] [thread0 only] Item with which to compare the first tile item (input0 from thread0). - { - // Share last item - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - // Set flag for first thread-item - preds[0] = (linear_tid == 0) ? - tile_predecessor_item : // First thread - temp_storage.last_items[linear_tid - 1]; - - head_flags[0] = ApplyOp::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - } - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - - /** - * \brief Sets head flags indicating discontinuities between items partitioned across the thread block, for which the first item has no reference and is always flagged. - * - * \par - * - The flag head_flagsi is set for item - * inputi when - * flag_op(previous-item, inputi) - * returns \p true (where previous-item is either the preceding item - * in the same thread or the last item in the previous thread). - * - For thread0, item input0 is always flagged. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the head-flagging of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int - * typedef cub::BlockDiscontinuity BlockDiscontinuity; - * - * // Allocate shared memory for BlockDiscontinuity - * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute head flags for discontinuities in the segment - * int head_flags[4]; - * BlockDiscontinuity(temp_storage).FlagHeads(head_flags, thread_data, cub::Inequality()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [0,0,1,1], [1,1,1,1], [2,3,3,3], [3,4,4,4], ... }. - * The corresponding output \p head_flags in those threads will be - * { [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam FlagT [inferred] The flag type (must be an integer type) - * \tparam FlagOp [inferred] Binary predicate functor type having member T operator()(const T &a, const T &b) or member T operator()(const T &a, const T &b, unsigned int b_index), and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. - */ - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeads( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - T preds[ITEMS_PER_THREAD]; - FlagHeads(head_flags, input, preds, flag_op); - } - - - /** - * \brief Sets head flags indicating discontinuities between items partitioned across the thread block. - * - * \par - * - The flag head_flagsi is set for item - * inputi when - * flag_op(previous-item, inputi) - * returns \p true (where previous-item is either the preceding item - * in the same thread or the last item in the previous thread). - * - For thread0, item input0 is compared - * against \p tile_predecessor_item. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the head-flagging of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int - * typedef cub::BlockDiscontinuity BlockDiscontinuity; - * - * // Allocate shared memory for BlockDiscontinuity - * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Have thread0 obtain the predecessor item for the entire tile - * int tile_predecessor_item; - * if (threadIdx.x == 0) tile_predecessor_item == ... - * - * // Collectively compute head flags for discontinuities in the segment - * int head_flags[4]; - * BlockDiscontinuity(temp_storage).FlagHeads( - * head_flags, thread_data, cub::Inequality(), tile_predecessor_item); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [0,0,1,1], [1,1,1,1], [2,3,3,3], [3,4,4,4], ... }, - * and that \p tile_predecessor_item is \p 0. The corresponding output \p head_flags in those threads will be - * { [0,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam FlagT [inferred] The flag type (must be an integer type) - * \tparam FlagOp [inferred] Binary predicate functor type having member T operator()(const T &a, const T &b) or member T operator()(const T &a, const T &b, unsigned int b_index), and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. - */ - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeads( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op, ///< [in] Binary boolean flag predicate - T tile_predecessor_item) ///< [in] [thread0 only] Item with which to compare the first tile item (input0 from thread0). - { - T preds[ITEMS_PER_THREAD]; - FlagHeads(head_flags, input, preds, flag_op, tile_predecessor_item); - } - - - - //@} end member group - /******************************************************************//** - * \name Tail flag operations - *********************************************************************/ - //@{ - - - /** - * \brief Sets tail flags indicating discontinuities between items partitioned across the thread block, for which the last item has no reference and is always flagged. - * - * \par - * - The flag tail_flagsi is set for item - * inputi when - * flag_op(inputi, next-item) - * returns \p true (where next-item is either the next item - * in the same thread or the first item in the next thread). - * - For threadBLOCK_THREADS-1, item - * inputITEMS_PER_THREAD-1 is always flagged. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the tail-flagging of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int - * typedef cub::BlockDiscontinuity BlockDiscontinuity; - * - * // Allocate shared memory for BlockDiscontinuity - * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute tail flags for discontinuities in the segment - * int tail_flags[4]; - * BlockDiscontinuity(temp_storage).FlagTails(tail_flags, thread_data, cub::Inequality()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }. - * The corresponding output \p tail_flags in those threads will be - * { [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,1] }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam FlagT [inferred] The flag type (must be an integer type) - * \tparam FlagOp [inferred] Binary predicate functor type having member T operator()(const T &a, const T &b) or member T operator()(const T &a, const T &b, unsigned int b_index), and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. - */ - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagTails( - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share first item - temp_storage.first_items[linear_tid] = input[0]; - - CTA_SYNC(); - - // Set flag for last thread-item - tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? - 1 : // Last thread - ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - temp_storage.first_items[linear_tid + 1], - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - - /** - * \brief Sets tail flags indicating discontinuities between items partitioned across the thread block. - * - * \par - * - The flag tail_flagsi is set for item - * inputi when - * flag_op(inputi, next-item) - * returns \p true (where next-item is either the next item - * in the same thread or the first item in the next thread). - * - For threadBLOCK_THREADS-1, item - * inputITEMS_PER_THREAD-1 is compared - * against \p tile_successor_item. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the tail-flagging of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int - * typedef cub::BlockDiscontinuity BlockDiscontinuity; - * - * // Allocate shared memory for BlockDiscontinuity - * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Have thread127 obtain the successor item for the entire tile - * int tile_successor_item; - * if (threadIdx.x == 127) tile_successor_item == ... - * - * // Collectively compute tail flags for discontinuities in the segment - * int tail_flags[4]; - * BlockDiscontinuity(temp_storage).FlagTails( - * tail_flags, thread_data, cub::Inequality(), tile_successor_item); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] } - * and that \p tile_successor_item is \p 125. The corresponding output \p tail_flags in those threads will be - * { [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,0] }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam FlagT [inferred] The flag type (must be an integer type) - * \tparam FlagOp [inferred] Binary predicate functor type having member T operator()(const T &a, const T &b) or member T operator()(const T &a, const T &b, unsigned int b_index), and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. - */ - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagTails( - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op, ///< [in] Binary boolean flag predicate - T tile_successor_item) ///< [in] [threadBLOCK_THREADS-1 only] Item with which to compare the last tile item (inputITEMS_PER_THREAD-1 from threadBLOCK_THREADS-1). - { - // Share first item - temp_storage.first_items[linear_tid] = input[0]; - - CTA_SYNC(); - - // Set flag for last thread-item - T successor_item = (linear_tid == BLOCK_THREADS - 1) ? - tile_successor_item : // Last thread - temp_storage.first_items[linear_tid + 1]; - - tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - successor_item, - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - - //@} end member group - /******************************************************************//** - * \name Head & tail flag operations - *********************************************************************/ - //@{ - - - /** - * \brief Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. - * - * \par - * - The flag head_flagsi is set for item - * inputi when - * flag_op(previous-item, inputi) - * returns \p true (where previous-item is either the preceding item - * in the same thread or the last item in the previous thread). - * - For thread0, item input0 is always flagged. - * - The flag tail_flagsi is set for item - * inputi when - * flag_op(inputi, next-item) - * returns \p true (where next-item is either the next item - * in the same thread or the first item in the next thread). - * - For threadBLOCK_THREADS-1, item - * inputITEMS_PER_THREAD-1 is always flagged. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the head- and tail-flagging of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int - * typedef cub::BlockDiscontinuity BlockDiscontinuity; - * - * // Allocate shared memory for BlockDiscontinuity - * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute head and flags for discontinuities in the segment - * int head_flags[4]; - * int tail_flags[4]; - * BlockDiscontinuity(temp_storage).FlagTails( - * head_flags, tail_flags, thread_data, cub::Inequality()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] } - * and that the tile_successor_item is \p 125. The corresponding output \p head_flags - * in those threads will be { [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }. - * and the corresponding output \p tail_flags in those threads will be - * { [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,1] }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam FlagT [inferred] The flag type (must be an integer type) - * \tparam FlagOp [inferred] Binary predicate functor type having member T operator()(const T &a, const T &b) or member T operator()(const T &a, const T &b, unsigned int b_index), and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. - */ - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeadsAndTails( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share first and last items - temp_storage.first_items[linear_tid] = input[0]; - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - T preds[ITEMS_PER_THREAD]; - - // Set flag for first thread-item - preds[0] = temp_storage.last_items[linear_tid - 1]; - if (linear_tid == 0) - { - head_flags[0] = 1; - } - else - { - head_flags[0] = ApplyOp::FlagT( - flag_op, - preds[0], - input[0], - linear_tid * ITEMS_PER_THREAD); - } - - - // Set flag for last thread-item - tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? - 1 : // Last thread - ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - temp_storage.first_items[linear_tid + 1], - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - - /** - * \brief Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. - * - * \par - * - The flag head_flagsi is set for item - * inputi when - * flag_op(previous-item, inputi) - * returns \p true (where previous-item is either the preceding item - * in the same thread or the last item in the previous thread). - * - For thread0, item input0 is always flagged. - * - The flag tail_flagsi is set for item - * inputi when - * flag_op(inputi, next-item) - * returns \p true (where next-item is either the next item - * in the same thread or the first item in the next thread). - * - For threadBLOCK_THREADS-1, item - * inputITEMS_PER_THREAD-1 is compared - * against \p tile_predecessor_item. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the head- and tail-flagging of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int - * typedef cub::BlockDiscontinuity BlockDiscontinuity; - * - * // Allocate shared memory for BlockDiscontinuity - * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Have thread127 obtain the successor item for the entire tile - * int tile_successor_item; - * if (threadIdx.x == 127) tile_successor_item == ... - * - * // Collectively compute head and flags for discontinuities in the segment - * int head_flags[4]; - * int tail_flags[4]; - * BlockDiscontinuity(temp_storage).FlagTails( - * head_flags, tail_flags, tile_successor_item, thread_data, cub::Inequality()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] } - * and that the tile_successor_item is \p 125. The corresponding output \p head_flags - * in those threads will be { [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }. - * and the corresponding output \p tail_flags in those threads will be - * { [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,0] }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam FlagT [inferred] The flag type (must be an integer type) - * \tparam FlagOp [inferred] Binary predicate functor type having member T operator()(const T &a, const T &b) or member T operator()(const T &a, const T &b, unsigned int b_index), and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. - */ - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeadsAndTails( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T tile_successor_item, ///< [in] [threadBLOCK_THREADS-1 only] Item with which to compare the last tile item (inputITEMS_PER_THREAD-1 from threadBLOCK_THREADS-1). - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share first and last items - temp_storage.first_items[linear_tid] = input[0]; - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - T preds[ITEMS_PER_THREAD]; - - // Set flag for first thread-item - if (linear_tid == 0) - { - head_flags[0] = 1; - } - else - { - preds[0] = temp_storage.last_items[linear_tid - 1]; - head_flags[0] = ApplyOp::FlagT( - flag_op, - preds[0], - input[0], - linear_tid * ITEMS_PER_THREAD); - } - - // Set flag for last thread-item - T successor_item = (linear_tid == BLOCK_THREADS - 1) ? - tile_successor_item : // Last thread - temp_storage.first_items[linear_tid + 1]; - - tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - successor_item, - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - - /** - * \brief Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. - * - * \par - * - The flag head_flagsi is set for item - * inputi when - * flag_op(previous-item, inputi) - * returns \p true (where previous-item is either the preceding item - * in the same thread or the last item in the previous thread). - * - For thread0, item input0 is compared - * against \p tile_predecessor_item. - * - The flag tail_flagsi is set for item - * inputi when - * flag_op(inputi, next-item) - * returns \p true (where next-item is either the next item - * in the same thread or the first item in the next thread). - * - For threadBLOCK_THREADS-1, item - * inputITEMS_PER_THREAD-1 is always flagged. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the head- and tail-flagging of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int - * typedef cub::BlockDiscontinuity BlockDiscontinuity; - * - * // Allocate shared memory for BlockDiscontinuity - * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Have thread0 obtain the predecessor item for the entire tile - * int tile_predecessor_item; - * if (threadIdx.x == 0) tile_predecessor_item == ... - * - * // Have thread127 obtain the successor item for the entire tile - * int tile_successor_item; - * if (threadIdx.x == 127) tile_successor_item == ... - * - * // Collectively compute head and flags for discontinuities in the segment - * int head_flags[4]; - * int tail_flags[4]; - * BlockDiscontinuity(temp_storage).FlagTails( - * head_flags, tile_predecessor_item, tail_flags, tile_successor_item, - * thread_data, cub::Inequality()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }, - * that the \p tile_predecessor_item is \p 0, and that the - * \p tile_successor_item is \p 125. The corresponding output \p head_flags - * in those threads will be { [0,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }. - * and the corresponding output \p tail_flags in those threads will be - * { [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,1] }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam FlagT [inferred] The flag type (must be an integer type) - * \tparam FlagOp [inferred] Binary predicate functor type having member T operator()(const T &a, const T &b) or member T operator()(const T &a, const T &b, unsigned int b_index), and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. - */ - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeadsAndTails( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T tile_predecessor_item, ///< [in] [thread0 only] Item with which to compare the first tile item (input0 from thread0). - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share first and last items - temp_storage.first_items[linear_tid] = input[0]; - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - T preds[ITEMS_PER_THREAD]; - - // Set flag for first thread-item - preds[0] = (linear_tid == 0) ? - tile_predecessor_item : // First thread - temp_storage.last_items[linear_tid - 1]; - - head_flags[0] = ApplyOp::FlagT( - flag_op, - preds[0], - input[0], - linear_tid * ITEMS_PER_THREAD); - - // Set flag for last thread-item - tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? - 1 : // Last thread - ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - temp_storage.first_items[linear_tid + 1], - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - - /** - * \brief Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. - * - * \par - * - The flag head_flagsi is set for item - * inputi when - * flag_op(previous-item, inputi) - * returns \p true (where previous-item is either the preceding item - * in the same thread or the last item in the previous thread). - * - For thread0, item input0 is compared - * against \p tile_predecessor_item. - * - The flag tail_flagsi is set for item - * inputi when - * flag_op(inputi, next-item) - * returns \p true (where next-item is either the next item - * in the same thread or the first item in the next thread). - * - For threadBLOCK_THREADS-1, item - * inputITEMS_PER_THREAD-1 is compared - * against \p tile_successor_item. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the head- and tail-flagging of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int - * typedef cub::BlockDiscontinuity BlockDiscontinuity; - * - * // Allocate shared memory for BlockDiscontinuity - * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Have thread0 obtain the predecessor item for the entire tile - * int tile_predecessor_item; - * if (threadIdx.x == 0) tile_predecessor_item == ... - * - * // Have thread127 obtain the successor item for the entire tile - * int tile_successor_item; - * if (threadIdx.x == 127) tile_successor_item == ... - * - * // Collectively compute head and flags for discontinuities in the segment - * int head_flags[4]; - * int tail_flags[4]; - * BlockDiscontinuity(temp_storage).FlagTails( - * head_flags, tile_predecessor_item, tail_flags, tile_successor_item, - * thread_data, cub::Inequality()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }, - * that the \p tile_predecessor_item is \p 0, and that the - * \p tile_successor_item is \p 125. The corresponding output \p head_flags - * in those threads will be { [0,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }. - * and the corresponding output \p tail_flags in those threads will be - * { [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,0] }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam FlagT [inferred] The flag type (must be an integer type) - * \tparam FlagOp [inferred] Binary predicate functor type having member T operator()(const T &a, const T &b) or member T operator()(const T &a, const T &b, unsigned int b_index), and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. - */ - template < - int ITEMS_PER_THREAD, - typename FlagT, - typename FlagOp> - __device__ __forceinline__ void FlagHeadsAndTails( - FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags - T tile_predecessor_item, ///< [in] [thread0 only] Item with which to compare the first tile item (input0 from thread0). - FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags - T tile_successor_item, ///< [in] [threadBLOCK_THREADS-1 only] Item with which to compare the last tile item (inputITEMS_PER_THREAD-1 from threadBLOCK_THREADS-1). - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - FlagOp flag_op) ///< [in] Binary boolean flag predicate - { - // Share first and last items - temp_storage.first_items[linear_tid] = input[0]; - temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - T preds[ITEMS_PER_THREAD]; - - // Set flag for first thread-item - preds[0] = (linear_tid == 0) ? - tile_predecessor_item : // First thread - temp_storage.last_items[linear_tid - 1]; - - head_flags[0] = ApplyOp::FlagT( - flag_op, - preds[0], - input[0], - linear_tid * ITEMS_PER_THREAD); - - // Set flag for last thread-item - T successor_item = (linear_tid == BLOCK_THREADS - 1) ? - tile_successor_item : // Last thread - temp_storage.first_items[linear_tid + 1]; - - tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp::FlagT( - flag_op, - input[ITEMS_PER_THREAD - 1], - successor_item, - (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); - - // Set head_flags for remaining items - Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); - - // Set tail_flags for remaining items - Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); - } - - - - - //@} end member group - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/block/block_exchange.cuh b/ml-xgboost/cub/cub/block/block_exchange.cuh deleted file mode 100644 index 5217976..0000000 --- a/ml-xgboost/cub/cub/block/block_exchange.cuh +++ /dev/null @@ -1,1248 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::BlockExchange class provides [collective](index.html#sec0) methods for rearranging data partitioned across a CUDA thread block. - */ - -#pragma once - -#include "../util_ptx.cuh" -#include "../util_arch.cuh" -#include "../util_macro.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief The BlockExchange class provides [collective](index.html#sec0) methods for rearranging data partitioned across a CUDA thread block. ![](transpose_logo.png) - * \ingroup BlockModule - * - * \tparam T The data type to be exchanged. - * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension - * \tparam ITEMS_PER_THREAD The number of items partitioned onto each thread. - * \tparam WARP_TIME_SLICING [optional] When \p true, only use enough shared memory for a single warp's worth of tile data, time-slicing the block-wide exchange over multiple synchronized rounds. Yields a smaller memory footprint at the expense of decreased parallelism. (Default: false) - * \tparam BLOCK_DIM_Y [optional] The thread block length in threads along the Y dimension (default: 1) - * \tparam BLOCK_DIM_Z [optional] The thread block length in threads along the Z dimension (default: 1) - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * - It is commonplace for blocks of threads to rearrange data items between - * threads. For example, the device-accessible memory subsystem prefers access patterns - * where data items are "striped" across threads (where consecutive threads access consecutive items), - * yet most block-wide operations prefer a "blocked" partitioning of items across threads - * (where consecutive items belong to a single thread). - * - BlockExchange supports the following types of data exchanges: - * - Transposing between [blocked](index.html#sec5sec3) and [striped](index.html#sec5sec3) arrangements - * - Transposing between [blocked](index.html#sec5sec3) and [warp-striped](index.html#sec5sec3) arrangements - * - Scattering ranked items to a [blocked arrangement](index.html#sec5sec3) - * - Scattering ranked items to a [striped arrangement](index.html#sec5sec3) - * - \rowmajor - * - * \par A Simple Example - * \blockcollective{BlockExchange} - * \par - * The code snippet below illustrates the conversion from a "blocked" to a "striped" arrangement - * of 512 integer items partitioned across 128 threads where each thread owns 4 items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, ...) - * { - * // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockExchange BlockExchange; - * - * // Allocate shared memory for BlockExchange - * __shared__ typename BlockExchange::TempStorage temp_storage; - * - * // Load a tile of data striped across threads - * int thread_data[4]; - * cub::LoadDirectStriped<128>(threadIdx.x, d_data, thread_data); - * - * // Collectively exchange data into a blocked arrangement across threads - * BlockExchange(temp_storage).StripedToBlocked(thread_data); - * - * \endcode - * \par - * Suppose the set of striped input \p thread_data across the block of threads is - * { [0,128,256,384], [1,129,257,385], ..., [127,255,383,511] }. - * The corresponding output \p thread_data in those threads will be - * { [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }. - * - * \par Performance Considerations - * - Proper device-specific padding ensures zero bank conflicts for most types. - * - */ -template < - typename InputT, - int BLOCK_DIM_X, - int ITEMS_PER_THREAD, - bool WARP_TIME_SLICING = false, - int BLOCK_DIM_Y = 1, - int BLOCK_DIM_Z = 1, - int PTX_ARCH = CUB_PTX_ARCH> -class BlockExchange -{ -private: - - /****************************************************************************** - * Constants - ******************************************************************************/ - - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - - LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(PTX_ARCH), - WARP_THREADS = 1 << LOG_WARP_THREADS, - WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, - - LOG_SMEM_BANKS = CUB_LOG_SMEM_BANKS(PTX_ARCH), - SMEM_BANKS = 1 << LOG_SMEM_BANKS, - - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - - TIME_SLICES = (WARP_TIME_SLICING) ? WARPS : 1, - - TIME_SLICED_THREADS = (WARP_TIME_SLICING) ? CUB_MIN(BLOCK_THREADS, WARP_THREADS) : BLOCK_THREADS, - TIME_SLICED_ITEMS = TIME_SLICED_THREADS * ITEMS_PER_THREAD, - - WARP_TIME_SLICED_THREADS = CUB_MIN(BLOCK_THREADS, WARP_THREADS), - WARP_TIME_SLICED_ITEMS = WARP_TIME_SLICED_THREADS * ITEMS_PER_THREAD, - - // Insert padding to avoid bank conflicts during raking when items per thread is a power of two and > 4 (otherwise we can typically use 128b loads) - INSERT_PADDING = (ITEMS_PER_THREAD > 4) && (PowerOfTwo::VALUE), - PADDING_ITEMS = (INSERT_PADDING) ? (TIME_SLICED_ITEMS >> LOG_SMEM_BANKS) : 0, - }; - - /****************************************************************************** - * Type definitions - ******************************************************************************/ - - /// Shared memory storage layout type - struct __align__(16) _TempStorage - { - InputT buff[TIME_SLICED_ITEMS + PADDING_ITEMS]; - }; - -public: - - /// \smemstorage{BlockExchange} - struct TempStorage : Uninitialized<_TempStorage> {}; - -private: - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Shared storage reference - _TempStorage &temp_storage; - - /// Linear thread-id - unsigned int linear_tid; - unsigned int lane_id; - unsigned int warp_id; - unsigned int warp_offset; - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /// Internal storage allocator - __device__ __forceinline__ _TempStorage& PrivateStorage() - { - __shared__ _TempStorage private_storage; - return private_storage; - } - - - /** - * Transposes data items from blocked arrangement to striped arrangement. Specialized for no timeslicing. - */ - template - __device__ __forceinline__ void BlockedToStriped( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - Int2Type /*time_slicing*/) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = (linear_tid * ITEMS_PER_THREAD) + ITEM; - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_storage.buff[item_offset] = input_items[ITEM]; - } - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid; - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - output_items[ITEM] = temp_storage.buff[item_offset]; - } - } - - - /** - * Transposes data items from blocked arrangement to striped arrangement. Specialized for warp-timeslicing. - */ - template - __device__ __forceinline__ void BlockedToStriped( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - Int2Type /*time_slicing*/) - { - InputT temp_items[ITEMS_PER_THREAD]; - - #pragma unroll - for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++) - { - const int SLICE_OFFSET = SLICE * TIME_SLICED_ITEMS; - const int SLICE_OOB = SLICE_OFFSET + TIME_SLICED_ITEMS; - - CTA_SYNC(); - - if (warp_id == SLICE) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = (lane_id * ITEMS_PER_THREAD) + ITEM; - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_storage.buff[item_offset] = input_items[ITEM]; - } - } - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - // Read a strip of items - const int STRIP_OFFSET = ITEM * BLOCK_THREADS; - const int STRIP_OOB = STRIP_OFFSET + BLOCK_THREADS; - - if ((SLICE_OFFSET < STRIP_OOB) && (SLICE_OOB > STRIP_OFFSET)) - { - int item_offset = STRIP_OFFSET + linear_tid - SLICE_OFFSET; - if ((item_offset >= 0) && (item_offset < TIME_SLICED_ITEMS)) - { - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_items[ITEM] = temp_storage.buff[item_offset]; - } - } - } - } - - // Copy - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - output_items[ITEM] = temp_items[ITEM]; - } - } - - - /** - * Transposes data items from blocked arrangement to warp-striped arrangement. Specialized for no timeslicing - */ - template - __device__ __forceinline__ void BlockedToWarpStriped( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - Int2Type /*time_slicing*/) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = warp_offset + ITEM + (lane_id * ITEMS_PER_THREAD); - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_storage.buff[item_offset] = input_items[ITEM]; - } - - WARP_SYNC(0xffffffff); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = warp_offset + (ITEM * WARP_TIME_SLICED_THREADS) + lane_id; - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - output_items[ITEM] = temp_storage.buff[item_offset]; - } - } - - /** - * Transposes data items from blocked arrangement to warp-striped arrangement. Specialized for warp-timeslicing - */ - template - __device__ __forceinline__ void BlockedToWarpStriped( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - Int2Type /*time_slicing*/) - { - if (warp_id == 0) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = ITEM + (lane_id * ITEMS_PER_THREAD); - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_storage.buff[item_offset] = input_items[ITEM]; - } - - WARP_SYNC(0xffffffff); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = (ITEM * WARP_TIME_SLICED_THREADS) + lane_id; - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - output_items[ITEM] = temp_storage.buff[item_offset]; - } - } - - #pragma unroll - for (unsigned int SLICE = 1; SLICE < TIME_SLICES; ++SLICE) - { - CTA_SYNC(); - - if (warp_id == SLICE) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = ITEM + (lane_id * ITEMS_PER_THREAD); - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_storage.buff[item_offset] = input_items[ITEM]; - } - - WARP_SYNC(0xffffffff); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = (ITEM * WARP_TIME_SLICED_THREADS) + lane_id; - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - output_items[ITEM] = temp_storage.buff[item_offset]; - } - } - } - } - - - /** - * Transposes data items from striped arrangement to blocked arrangement. Specialized for no timeslicing. - */ - template - __device__ __forceinline__ void StripedToBlocked( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - Int2Type /*time_slicing*/) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid; - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_storage.buff[item_offset] = input_items[ITEM]; - } - - CTA_SYNC(); - - // No timeslicing - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = (linear_tid * ITEMS_PER_THREAD) + ITEM; - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - output_items[ITEM] = temp_storage.buff[item_offset]; - } - } - - - /** - * Transposes data items from striped arrangement to blocked arrangement. Specialized for warp-timeslicing. - */ - template - __device__ __forceinline__ void StripedToBlocked( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - Int2Type /*time_slicing*/) - { - // Warp time-slicing - InputT temp_items[ITEMS_PER_THREAD]; - - #pragma unroll - for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++) - { - const int SLICE_OFFSET = SLICE * TIME_SLICED_ITEMS; - const int SLICE_OOB = SLICE_OFFSET + TIME_SLICED_ITEMS; - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - // Write a strip of items - const int STRIP_OFFSET = ITEM * BLOCK_THREADS; - const int STRIP_OOB = STRIP_OFFSET + BLOCK_THREADS; - - if ((SLICE_OFFSET < STRIP_OOB) && (SLICE_OOB > STRIP_OFFSET)) - { - int item_offset = STRIP_OFFSET + linear_tid - SLICE_OFFSET; - if ((item_offset >= 0) && (item_offset < TIME_SLICED_ITEMS)) - { - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_storage.buff[item_offset] = input_items[ITEM]; - } - } - } - - CTA_SYNC(); - - if (warp_id == SLICE) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = (lane_id * ITEMS_PER_THREAD) + ITEM; - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_items[ITEM] = temp_storage.buff[item_offset]; - } - } - } - - // Copy - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - output_items[ITEM] = temp_items[ITEM]; - } - } - - - /** - * Transposes data items from warp-striped arrangement to blocked arrangement. Specialized for no timeslicing - */ - template - __device__ __forceinline__ void WarpStripedToBlocked( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - Int2Type /*time_slicing*/) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = warp_offset + (ITEM * WARP_TIME_SLICED_THREADS) + lane_id; - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_storage.buff[item_offset] = input_items[ITEM]; - } - - WARP_SYNC(0xffffffff); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = warp_offset + ITEM + (lane_id * ITEMS_PER_THREAD); - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - output_items[ITEM] = temp_storage.buff[item_offset]; - } - } - - - /** - * Transposes data items from warp-striped arrangement to blocked arrangement. Specialized for warp-timeslicing - */ - template - __device__ __forceinline__ void WarpStripedToBlocked( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - Int2Type /*time_slicing*/) - { - #pragma unroll - for (unsigned int SLICE = 0; SLICE < TIME_SLICES; ++SLICE) - { - CTA_SYNC(); - - if (warp_id == SLICE) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = (ITEM * WARP_TIME_SLICED_THREADS) + lane_id; - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_storage.buff[item_offset] = input_items[ITEM]; - } - - WARP_SYNC(0xffffffff); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = ITEM + (lane_id * ITEMS_PER_THREAD); - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - output_items[ITEM] = temp_storage.buff[item_offset]; - } - } - } - } - - - /** - * Exchanges data items annotated by rank into blocked arrangement. Specialized for no timeslicing. - */ - template - __device__ __forceinline__ void ScatterToBlocked( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks - Int2Type /*time_slicing*/) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = ranks[ITEM]; - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - temp_storage.buff[item_offset] = input_items[ITEM]; - } - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = (linear_tid * ITEMS_PER_THREAD) + ITEM; - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - output_items[ITEM] = temp_storage.buff[item_offset]; - } - } - - /** - * Exchanges data items annotated by rank into blocked arrangement. Specialized for warp-timeslicing. - */ - template - __device__ __forceinline__ void ScatterToBlocked( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks - Int2Type /*time_slicing*/) - { - InputT temp_items[ITEMS_PER_THREAD]; - - #pragma unroll - for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++) - { - CTA_SYNC(); - - const int SLICE_OFFSET = TIME_SLICED_ITEMS * SLICE; - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = ranks[ITEM] - SLICE_OFFSET; - if ((item_offset >= 0) && (item_offset < WARP_TIME_SLICED_ITEMS)) - { - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - temp_storage.buff[item_offset] = input_items[ITEM]; - } - } - - CTA_SYNC(); - - if (warp_id == SLICE) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = (lane_id * ITEMS_PER_THREAD) + ITEM; - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - temp_items[ITEM] = temp_storage.buff[item_offset]; - } - } - } - - // Copy - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - output_items[ITEM] = temp_items[ITEM]; - } - } - - - /** - * Exchanges data items annotated by rank into striped arrangement. Specialized for no timeslicing. - */ - template - __device__ __forceinline__ void ScatterToStriped( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks - Int2Type /*time_slicing*/) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = ranks[ITEM]; - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - temp_storage.buff[item_offset] = input_items[ITEM]; - } - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid; - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - output_items[ITEM] = temp_storage.buff[item_offset]; - } - } - - - /** - * Exchanges data items annotated by rank into striped arrangement. Specialized for warp-timeslicing. - */ - template - __device__ __forceinline__ void ScatterToStriped( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between blocked and striped arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items to exchange, converting between blocked and striped arrangements. - OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks - Int2Type /*time_slicing*/) - { - InputT temp_items[ITEMS_PER_THREAD]; - - #pragma unroll - for (int SLICE = 0; SLICE < TIME_SLICES; SLICE++) - { - const int SLICE_OFFSET = SLICE * TIME_SLICED_ITEMS; - const int SLICE_OOB = SLICE_OFFSET + TIME_SLICED_ITEMS; - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = ranks[ITEM] - SLICE_OFFSET; - if ((item_offset >= 0) && (item_offset < WARP_TIME_SLICED_ITEMS)) - { - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - temp_storage.buff[item_offset] = input_items[ITEM]; - } - } - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - // Read a strip of items - const int STRIP_OFFSET = ITEM * BLOCK_THREADS; - const int STRIP_OOB = STRIP_OFFSET + BLOCK_THREADS; - - if ((SLICE_OFFSET < STRIP_OOB) && (SLICE_OOB > STRIP_OFFSET)) - { - int item_offset = STRIP_OFFSET + linear_tid - SLICE_OFFSET; - if ((item_offset >= 0) && (item_offset < TIME_SLICED_ITEMS)) - { - if (INSERT_PADDING) item_offset += item_offset >> LOG_SMEM_BANKS; - temp_items[ITEM] = temp_storage.buff[item_offset]; - } - } - } - } - - // Copy - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - output_items[ITEM] = temp_items[ITEM]; - } - } - - -public: - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using a private static allocation of shared memory as temporary storage. - */ - __device__ __forceinline__ BlockExchange() - : - temp_storage(PrivateStorage()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), - warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS), - lane_id(LaneId()), - warp_offset(warp_id * WARP_TIME_SLICED_ITEMS) - {} - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. - */ - __device__ __forceinline__ BlockExchange( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), - lane_id(LaneId()), - warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS), - warp_offset(warp_id * WARP_TIME_SLICED_ITEMS) - {} - - - //@} end member group - /******************************************************************//** - * \name Structured exchanges - *********************************************************************/ - //@{ - - /** - * \brief Transposes data items from striped arrangement to blocked arrangement. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the conversion from a "striped" to a "blocked" arrangement - * of 512 integer items partitioned across 128 threads where each thread owns 4 items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, ...) - * { - * // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockExchange BlockExchange; - * - * // Allocate shared memory for BlockExchange - * __shared__ typename BlockExchange::TempStorage temp_storage; - * - * // Load a tile of ordered data into a striped arrangement across block threads - * int thread_data[4]; - * cub::LoadDirectStriped<128>(threadIdx.x, d_data, thread_data); - * - * // Collectively exchange data into a blocked arrangement across threads - * BlockExchange(temp_storage).StripedToBlocked(thread_data, thread_data); - * - * \endcode - * \par - * Suppose the set of striped input \p thread_data across the block of threads is - * { [0,128,256,384], [1,129,257,385], ..., [127,255,383,511] } after loading from device-accessible memory. - * The corresponding output \p thread_data in those threads will be - * { [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }. - * - */ - template - __device__ __forceinline__ void StripedToBlocked( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between striped and blocked arrangements. - OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between striped and blocked arrangements. - { - StripedToBlocked(input_items, output_items, Int2Type()); - } - - - /** - * \brief Transposes data items from blocked arrangement to striped arrangement. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the conversion from a "blocked" to a "striped" arrangement - * of 512 integer items partitioned across 128 threads where each thread owns 4 items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, ...) - * { - * // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockExchange BlockExchange; - * - * // Allocate shared memory for BlockExchange - * __shared__ typename BlockExchange::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively exchange data into a striped arrangement across threads - * BlockExchange(temp_storage).BlockedToStriped(thread_data, thread_data); - * - * // Store data striped across block threads into an ordered tile - * cub::StoreDirectStriped(threadIdx.x, d_data, thread_data); - * - * \endcode - * \par - * Suppose the set of blocked input \p thread_data across the block of threads is - * { [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }. - * The corresponding output \p thread_data in those threads will be - * { [0,128,256,384], [1,129,257,385], ..., [127,255,383,511] } in - * preparation for storing to device-accessible memory. - * - */ - template - __device__ __forceinline__ void BlockedToStriped( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between striped and blocked arrangements. - OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between striped and blocked arrangements. - { - BlockedToStriped(input_items, output_items, Int2Type()); - } - - - - /** - * \brief Transposes data items from warp-striped arrangement to blocked arrangement. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the conversion from a "warp-striped" to a "blocked" arrangement - * of 512 integer items partitioned across 128 threads where each thread owns 4 items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, ...) - * { - * // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockExchange BlockExchange; - * - * // Allocate shared memory for BlockExchange - * __shared__ typename BlockExchange::TempStorage temp_storage; - * - * // Load a tile of ordered data into a warp-striped arrangement across warp threads - * int thread_data[4]; - * cub::LoadSWarptriped(threadIdx.x, d_data, thread_data); - * - * // Collectively exchange data into a blocked arrangement across threads - * BlockExchange(temp_storage).WarpStripedToBlocked(thread_data); - * - * \endcode - * \par - * Suppose the set of warp-striped input \p thread_data across the block of threads is - * { [0,32,64,96], [1,33,65,97], [2,34,66,98], ..., [415,447,479,511] } - * after loading from device-accessible memory. (The first 128 items are striped across - * the first warp of 32 threads, the second 128 items are striped across the second warp, etc.) - * The corresponding output \p thread_data in those threads will be - * { [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }. - * - */ - template - __device__ __forceinline__ void WarpStripedToBlocked( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between striped and blocked arrangements. - OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between striped and blocked arrangements. - { - WarpStripedToBlocked(input_items, output_items, Int2Type()); - } - - - - /** - * \brief Transposes data items from blocked arrangement to warp-striped arrangement. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the conversion from a "blocked" to a "warp-striped" arrangement - * of 512 integer items partitioned across 128 threads where each thread owns 4 items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, ...) - * { - * // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockExchange BlockExchange; - * - * // Allocate shared memory for BlockExchange - * __shared__ typename BlockExchange::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively exchange data into a warp-striped arrangement across threads - * BlockExchange(temp_storage).BlockedToWarpStriped(thread_data, thread_data); - * - * // Store data striped across warp threads into an ordered tile - * cub::StoreDirectStriped(threadIdx.x, d_data, thread_data); - * - * \endcode - * \par - * Suppose the set of blocked input \p thread_data across the block of threads is - * { [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }. - * The corresponding output \p thread_data in those threads will be - * { [0,32,64,96], [1,33,65,97], [2,34,66,98], ..., [415,447,479,511] } - * in preparation for storing to device-accessible memory. (The first 128 items are striped across - * the first warp of 32 threads, the second 128 items are striped across the second warp, etc.) - * - */ - template - __device__ __forceinline__ void BlockedToWarpStriped( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between striped and blocked arrangements. - OutputT output_items[ITEMS_PER_THREAD]) ///< [out] Items from exchange, converting between striped and blocked arrangements. - { - BlockedToWarpStriped(input_items, output_items, Int2Type()); - } - - - - //@} end member group - /******************************************************************//** - * \name Scatter exchanges - *********************************************************************/ - //@{ - - - /** - * \brief Exchanges data items annotated by rank into blocked arrangement. - * - * \par - * - \smemreuse - * - * \tparam OffsetT [inferred] Signed integer type for local offsets - */ - template - __device__ __forceinline__ void ScatterToBlocked( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between striped and blocked arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between striped and blocked arrangements. - OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks - { - ScatterToBlocked(input_items, output_items, ranks, Int2Type()); - } - - - - /** - * \brief Exchanges data items annotated by rank into striped arrangement. - * - * \par - * - \smemreuse - * - * \tparam OffsetT [inferred] Signed integer type for local offsets - */ - template - __device__ __forceinline__ void ScatterToStriped( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between striped and blocked arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between striped and blocked arrangements. - OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks - { - ScatterToStriped(input_items, output_items, ranks, Int2Type()); - } - - - - /** - * \brief Exchanges data items annotated by rank into striped arrangement. Items with rank -1 are not exchanged. - * - * \par - * - \smemreuse - * - * \tparam OffsetT [inferred] Signed integer type for local offsets - */ - template - __device__ __forceinline__ void ScatterToStripedGuarded( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between striped and blocked arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between striped and blocked arrangements. - OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = ranks[ITEM]; - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - if (ranks[ITEM] >= 0) - temp_storage.buff[item_offset] = input_items[ITEM]; - } - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid; - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - output_items[ITEM] = temp_storage.buff[item_offset]; - } - } - - - - - /** - * \brief Exchanges valid data items annotated by rank into striped arrangement. - * - * \par - * - \smemreuse - * - * \tparam OffsetT [inferred] Signed integer type for local offsets - * \tparam ValidFlag [inferred] FlagT type denoting which items are valid - */ - template - __device__ __forceinline__ void ScatterToStripedFlagged( - InputT input_items[ITEMS_PER_THREAD], ///< [in] Items to exchange, converting between striped and blocked arrangements. - OutputT output_items[ITEMS_PER_THREAD], ///< [out] Items from exchange, converting between striped and blocked arrangements. - OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks - ValidFlag is_valid[ITEMS_PER_THREAD]) ///< [in] Corresponding flag denoting item validity - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = ranks[ITEM]; - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - if (is_valid[ITEM]) - temp_storage.buff[item_offset] = input_items[ITEM]; - } - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = int(ITEM * BLOCK_THREADS) + linear_tid; - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - output_items[ITEM] = temp_storage.buff[item_offset]; - } - } - - - //@} end member group - - - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - - __device__ __forceinline__ void StripedToBlocked( - InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between striped and blocked arrangements. - { - StripedToBlocked(items, items); - } - - __device__ __forceinline__ void BlockedToStriped( - InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between striped and blocked arrangements. - { - BlockedToStriped(items, items); - } - - __device__ __forceinline__ void WarpStripedToBlocked( - InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between striped and blocked arrangements. - { - WarpStripedToBlocked(items, items); - } - - __device__ __forceinline__ void BlockedToWarpStriped( - InputT items[ITEMS_PER_THREAD]) ///< [in-out] Items to exchange, converting between striped and blocked arrangements. - { - BlockedToWarpStriped(items, items); - } - - template - __device__ __forceinline__ void ScatterToBlocked( - InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between striped and blocked arrangements. - OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks - { - ScatterToBlocked(items, items, ranks); - } - - template - __device__ __forceinline__ void ScatterToStriped( - InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between striped and blocked arrangements. - OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks - { - ScatterToStriped(items, items, ranks); - } - - template - __device__ __forceinline__ void ScatterToStripedGuarded( - InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between striped and blocked arrangements. - OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks - { - ScatterToStripedGuarded(items, items, ranks); - } - - template - __device__ __forceinline__ void ScatterToStripedFlagged( - InputT items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange, converting between striped and blocked arrangements. - OffsetT ranks[ITEMS_PER_THREAD], ///< [in] Corresponding scatter ranks - ValidFlag is_valid[ITEMS_PER_THREAD]) ///< [in] Corresponding flag denoting item validity - { - ScatterToStriped(items, items, ranks, is_valid); - } - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - -}; - - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - -template < - typename T, - int ITEMS_PER_THREAD, - int LOGICAL_WARP_THREADS = CUB_PTX_WARP_THREADS, - int PTX_ARCH = CUB_PTX_ARCH> -class WarpExchange -{ -private: - - /****************************************************************************** - * Constants - ******************************************************************************/ - - /// Constants - enum - { - // Whether the logical warp size and the PTX warp size coincide - IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), - - WARP_ITEMS = (ITEMS_PER_THREAD * LOGICAL_WARP_THREADS) + 1, - - LOG_SMEM_BANKS = CUB_LOG_SMEM_BANKS(PTX_ARCH), - SMEM_BANKS = 1 << LOG_SMEM_BANKS, - - // Insert padding if the number of items per thread is a power of two and > 4 (otherwise we can typically use 128b loads) - INSERT_PADDING = (ITEMS_PER_THREAD > 4) && (PowerOfTwo::VALUE), - PADDING_ITEMS = (INSERT_PADDING) ? (WARP_ITEMS >> LOG_SMEM_BANKS) : 0, - }; - - /****************************************************************************** - * Type definitions - ******************************************************************************/ - - /// Shared memory storage layout type - struct _TempStorage - { - T buff[WARP_ITEMS + PADDING_ITEMS]; - }; - -public: - - /// \smemstorage{WarpExchange} - struct TempStorage : Uninitialized<_TempStorage> {}; - -private: - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - _TempStorage &temp_storage; - int lane_id; - -public: - - /****************************************************************************** - * Construction - ******************************************************************************/ - - /// Constructor - __device__ __forceinline__ WarpExchange( - TempStorage &temp_storage) - : - temp_storage(temp_storage.Alias()), - lane_id(IS_ARCH_WARP ? - LaneId() : - LaneId() % LOGICAL_WARP_THREADS) - {} - - - /****************************************************************************** - * Interface - ******************************************************************************/ - - /** - * \brief Exchanges valid data items annotated by rank into striped arrangement. - * - * \par - * - \smemreuse - * - * \tparam OffsetT [inferred] Signed integer type for local offsets - */ - template - __device__ __forceinline__ void ScatterToStriped( - T items[ITEMS_PER_THREAD], ///< [in-out] Items to exchange - OffsetT ranks[ITEMS_PER_THREAD]) ///< [in] Corresponding scatter ranks - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if (INSERT_PADDING) ranks[ITEM] = SHR_ADD(ranks[ITEM], LOG_SMEM_BANKS, ranks[ITEM]); - temp_storage.buff[ranks[ITEM]] = items[ITEM]; - } - - WARP_SYNC(0xffffffff); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - int item_offset = (ITEM * LOGICAL_WARP_THREADS) + lane_id; - if (INSERT_PADDING) item_offset = SHR_ADD(item_offset, LOG_SMEM_BANKS, item_offset); - items[ITEM] = temp_storage.buff[item_offset]; - } - } - -}; - - - - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/block_histogram.cuh b/ml-xgboost/cub/cub/block/block_histogram.cuh deleted file mode 100644 index b8bce77..0000000 --- a/ml-xgboost/cub/cub/block/block_histogram.cuh +++ /dev/null @@ -1,415 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::BlockHistogram class provides [collective](index.html#sec0) methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. - */ - -#pragma once - -#include "specializations/block_histogram_sort.cuh" -#include "specializations/block_histogram_atomic.cuh" -#include "../util_ptx.cuh" -#include "../util_arch.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Algorithmic variants - ******************************************************************************/ - -/** - * \brief BlockHistogramAlgorithm enumerates alternative algorithms for the parallel construction of block-wide histograms. - */ -enum BlockHistogramAlgorithm -{ - - /** - * \par Overview - * Sorting followed by differentiation. Execution is comprised of two phases: - * -# Sort the data using efficient radix sort - * -# Look for "runs" of same-valued keys by detecting discontinuities; the run-lengths are histogram bin counts. - * - * \par Performance Considerations - * Delivers consistent throughput regardless of sample bin distribution. - */ - BLOCK_HISTO_SORT, - - - /** - * \par Overview - * Use atomic addition to update byte counts directly - * - * \par Performance Considerations - * Performance is strongly tied to the hardware implementation of atomic - * addition, and may be significantly degraded for non uniformly-random - * input distributions where many concurrent updates are likely to be - * made to the same bin counter. - */ - BLOCK_HISTO_ATOMIC, -}; - - - -/****************************************************************************** - * Block histogram - ******************************************************************************/ - - -/** - * \brief The BlockHistogram class provides [collective](index.html#sec0) methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. ![](histogram_logo.png) - * \ingroup BlockModule - * - * \tparam T The sample type being histogrammed (must be castable to an integer bin identifier) - * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension - * \tparam ITEMS_PER_THREAD The number of items per thread - * \tparam BINS The number bins within the histogram - * \tparam ALGORITHM [optional] cub::BlockHistogramAlgorithm enumerator specifying the underlying algorithm to use (default: cub::BLOCK_HISTO_SORT) - * \tparam BLOCK_DIM_Y [optional] The thread block length in threads along the Y dimension (default: 1) - * \tparam BLOCK_DIM_Z [optional] The thread block length in threads along the Z dimension (default: 1) - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * - A histogram - * counts the number of observations that fall into each of the disjoint categories (known as bins). - * - BlockHistogram can be optionally specialized to use different algorithms: - * -# cub::BLOCK_HISTO_SORT. Sorting followed by differentiation. [More...](\ref cub::BlockHistogramAlgorithm) - * -# cub::BLOCK_HISTO_ATOMIC. Use atomic addition to update byte counts directly. [More...](\ref cub::BlockHistogramAlgorithm) - * - * \par Performance Considerations - * - \granularity - * - * \par A Simple Example - * \blockcollective{BlockHistogram} - * \par - * The code snippet below illustrates a 256-bin histogram of 512 integer samples that - * are partitioned across 128 threads where each thread owns 4 samples. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each - * typedef cub::BlockHistogram BlockHistogram; - * - * // Allocate shared memory for BlockHistogram - * __shared__ typename BlockHistogram::TempStorage temp_storage; - * - * // Allocate shared memory for block-wide histogram bin counts - * __shared__ unsigned int smem_histogram[256]; - * - * // Obtain input samples per thread - * unsigned char data[4]; - * ... - * - * // Compute the block-wide histogram - * BlockHistogram(temp_storage).Histogram(data, smem_histogram); - * - * \endcode - * - * \par Performance and Usage Considerations - * - The histogram output can be constructed in shared or device-accessible memory - * - See cub::BlockHistogramAlgorithm for performance details regarding algorithmic alternatives - * - */ -template < - typename T, - int BLOCK_DIM_X, - int ITEMS_PER_THREAD, - int BINS, - BlockHistogramAlgorithm ALGORITHM = BLOCK_HISTO_SORT, - int BLOCK_DIM_Y = 1, - int BLOCK_DIM_Z = 1, - int PTX_ARCH = CUB_PTX_ARCH> -class BlockHistogram -{ -private: - - /****************************************************************************** - * Constants and type definitions - ******************************************************************************/ - - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - }; - - /** - * Ensure the template parameterization meets the requirements of the - * targeted device architecture. BLOCK_HISTO_ATOMIC can only be used - * on version SM120 or later. Otherwise BLOCK_HISTO_SORT is used - * regardless. - */ - static const BlockHistogramAlgorithm SAFE_ALGORITHM = - ((ALGORITHM == BLOCK_HISTO_ATOMIC) && (PTX_ARCH < 120)) ? - BLOCK_HISTO_SORT : - ALGORITHM; - - /// Internal specialization. - typedef typename If<(SAFE_ALGORITHM == BLOCK_HISTO_SORT), - BlockHistogramSort, - BlockHistogramAtomic >::Type InternalBlockHistogram; - - /// Shared memory storage layout type for BlockHistogram - typedef typename InternalBlockHistogram::TempStorage _TempStorage; - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Shared storage reference - _TempStorage &temp_storage; - - /// Linear thread-id - unsigned int linear_tid; - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /// Internal storage allocator - __device__ __forceinline__ _TempStorage& PrivateStorage() - { - __shared__ _TempStorage private_storage; - return private_storage; - } - - -public: - - /// \smemstorage{BlockHistogram} - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using a private static allocation of shared memory as temporary storage. - */ - __device__ __forceinline__ BlockHistogram() - : - temp_storage(PrivateStorage()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. - */ - __device__ __forceinline__ BlockHistogram( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - //@} end member group - /******************************************************************//** - * \name Histogram operations - *********************************************************************/ - //@{ - - - /** - * \brief Initialize the shared histogram counters to zero. - * - * \par Snippet - * The code snippet below illustrates a the initialization and update of a - * histogram of 512 integer samples that are partitioned across 128 threads - * where each thread owns 4 samples. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each - * typedef cub::BlockHistogram BlockHistogram; - * - * // Allocate shared memory for BlockHistogram - * __shared__ typename BlockHistogram::TempStorage temp_storage; - * - * // Allocate shared memory for block-wide histogram bin counts - * __shared__ unsigned int smem_histogram[256]; - * - * // Obtain input samples per thread - * unsigned char thread_samples[4]; - * ... - * - * // Initialize the block-wide histogram - * BlockHistogram(temp_storage).InitHistogram(smem_histogram); - * - * // Update the block-wide histogram - * BlockHistogram(temp_storage).Composite(thread_samples, smem_histogram); - * - * \endcode - * - * \tparam CounterT [inferred] Histogram counter type - */ - template - __device__ __forceinline__ void InitHistogram(CounterT histogram[BINS]) - { - // Initialize histogram bin counts to zeros - int histo_offset = 0; - - #pragma unroll - for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS) - { - histogram[histo_offset + linear_tid] = 0; - } - // Finish up with guarded initialization if necessary - if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS)) - { - histogram[histo_offset + linear_tid] = 0; - } - } - - - /** - * \brief Constructs a block-wide histogram in shared/device-accessible memory. Each thread contributes an array of input elements. - * - * \par - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a 256-bin histogram of 512 integer samples that - * are partitioned across 128 threads where each thread owns 4 samples. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each - * typedef cub::BlockHistogram BlockHistogram; - * - * // Allocate shared memory for BlockHistogram - * __shared__ typename BlockHistogram::TempStorage temp_storage; - * - * // Allocate shared memory for block-wide histogram bin counts - * __shared__ unsigned int smem_histogram[256]; - * - * // Obtain input samples per thread - * unsigned char thread_samples[4]; - * ... - * - * // Compute the block-wide histogram - * BlockHistogram(temp_storage).Histogram(thread_samples, smem_histogram); - * - * \endcode - * - * \tparam CounterT [inferred] Histogram counter type - */ - template < - typename CounterT > - __device__ __forceinline__ void Histogram( - T (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's input values to histogram - CounterT histogram[BINS]) ///< [out] Reference to shared/device-accessible memory histogram - { - // Initialize histogram bin counts to zeros - InitHistogram(histogram); - - CTA_SYNC(); - - // Composite the histogram - InternalBlockHistogram(temp_storage).Composite(items, histogram); - } - - - - /** - * \brief Updates an existing block-wide histogram in shared/device-accessible memory. Each thread composites an array of input elements. - * - * \par - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a the initialization and update of a - * histogram of 512 integer samples that are partitioned across 128 threads - * where each thread owns 4 samples. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each - * typedef cub::BlockHistogram BlockHistogram; - * - * // Allocate shared memory for BlockHistogram - * __shared__ typename BlockHistogram::TempStorage temp_storage; - * - * // Allocate shared memory for block-wide histogram bin counts - * __shared__ unsigned int smem_histogram[256]; - * - * // Obtain input samples per thread - * unsigned char thread_samples[4]; - * ... - * - * // Initialize the block-wide histogram - * BlockHistogram(temp_storage).InitHistogram(smem_histogram); - * - * // Update the block-wide histogram - * BlockHistogram(temp_storage).Composite(thread_samples, smem_histogram); - * - * \endcode - * - * \tparam CounterT [inferred] Histogram counter type - */ - template < - typename CounterT > - __device__ __forceinline__ void Composite( - T (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's input values to histogram - CounterT histogram[BINS]) ///< [out] Reference to shared/device-accessible memory histogram - { - InternalBlockHistogram(temp_storage).Composite(items, histogram); - } - -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/block_load.cuh b/ml-xgboost/cub/cub/block/block_load.cuh deleted file mode 100644 index dac487f..0000000 --- a/ml-xgboost/cub/cub/block/block_load.cuh +++ /dev/null @@ -1,1268 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Operations for reading linear tiles of data into the CUDA thread block. - */ - -#pragma once - -#include - -#include "block_exchange.cuh" -#include "../iterator/cache_modified_input_iterator.cuh" -#include "../util_ptx.cuh" -#include "../util_macro.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \addtogroup UtilIo - * @{ - */ - - -/******************************************************************//** - * \name Blocked arrangement I/O (direct) - *********************************************************************/ -//@{ - - -/** - * \brief Load a linear segment of items into a blocked arrangement across the thread block. - * - * \blocked - * - * \tparam T [inferred] The data type to load. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam InputIteratorT [inferred] The random-access iterator type for input \iterator. - */ -template < - typename InputT, - int ITEMS_PER_THREAD, - typename InputIteratorT> -__device__ __forceinline__ void LoadDirectBlocked( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load -{ - InputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD); - - // Load directly in thread-blocked order - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - items[ITEM] = thread_itr[ITEM]; - } -} - - -/** - * \brief Load a linear segment of items into a blocked arrangement across the thread block, guarded by range. - * - * \blocked - * - * \tparam T [inferred] The data type to load. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam InputIteratorT [inferred] The random-access iterator type for input \iterator. - */ -template < - typename InputT, - int ITEMS_PER_THREAD, - typename InputIteratorT> -__device__ __forceinline__ void LoadDirectBlocked( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items) ///< [in] Number of valid items to load -{ - InputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if ((linear_tid * ITEMS_PER_THREAD) + ITEM < valid_items) - { - items[ITEM] = thread_itr[ITEM]; - } - } -} - - -/** - * \brief Load a linear segment of items into a blocked arrangement across the thread block, guarded by range, with a fall-back assignment of out-of-bound elements.. - * - * \blocked - * - * \tparam T [inferred] The data type to load. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam InputIteratorT [inferred] The random-access iterator type for input \iterator. - */ -template < - typename InputT, - typename DefaultT, - int ITEMS_PER_THREAD, - typename InputIteratorT> -__device__ __forceinline__ void LoadDirectBlocked( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items, ///< [in] Number of valid items to load - DefaultT oob_default) ///< [in] Default value to assign out-of-bound items -{ - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - items[ITEM] = oob_default; - - LoadDirectBlocked(linear_tid, block_itr, items, valid_items); -} - - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -/** - * Internal implementation for load vectorization - */ -template < - CacheLoadModifier MODIFIER, - typename T, - int ITEMS_PER_THREAD> -__device__ __forceinline__ void InternalLoadDirectBlockedVectorized( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - T *block_ptr, ///< [in] Input pointer for loading from - T (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load -{ - // Biggest memory access word that T is a whole multiple of - typedef typename UnitWord::DeviceWord DeviceWord; - - enum - { - TOTAL_WORDS = sizeof(items) / sizeof(DeviceWord), - - VECTOR_SIZE = (TOTAL_WORDS % 4 == 0) ? - 4 : - (TOTAL_WORDS % 2 == 0) ? - 2 : - 1, - - VECTORS_PER_THREAD = TOTAL_WORDS / VECTOR_SIZE, - }; - - // Vector type - typedef typename CubVector::Type Vector; - - // Vector items - Vector vec_items[VECTORS_PER_THREAD]; - - // Aliased input ptr - Vector* vec_ptr = reinterpret_cast(block_ptr) + (linear_tid * VECTORS_PER_THREAD); - - // Load directly in thread-blocked order - #pragma unroll - for (int ITEM = 0; ITEM < VECTORS_PER_THREAD; ITEM++) - { - vec_items[ITEM] = ThreadLoad(vec_ptr + ITEM); - } - - // Copy - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - items[ITEM] = *(reinterpret_cast(vec_items) + ITEM); - } -} - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - -/** - * \brief Load a linear segment of items into a blocked arrangement across the thread block. - * - * \blocked - * - * The input offset (\p block_ptr + \p block_offset) must be quad-item aligned - * - * The following conditions will prevent vectorization and loading will fall back to cub::BLOCK_LOAD_DIRECT: - * - \p ITEMS_PER_THREAD is odd - * - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.) - * - * \tparam T [inferred] The data type to load. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - */ -template < - typename T, - int ITEMS_PER_THREAD> -__device__ __forceinline__ void LoadDirectBlockedVectorized( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - T *block_ptr, ///< [in] Input pointer for loading from - T (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load -{ - InternalLoadDirectBlockedVectorized(linear_tid, block_ptr, items); -} - - -//@} end member group -/******************************************************************//** - * \name Striped arrangement I/O (direct) - *********************************************************************/ -//@{ - - -/** - * \brief Load a linear segment of items into a striped arrangement across the thread block. - * - * \striped - * - * \tparam BLOCK_THREADS The thread block size in threads - * \tparam T [inferred] The data type to load. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam InputIteratorT [inferred] The random-access iterator type for input \iterator. - */ -template < - int BLOCK_THREADS, - typename InputT, - int ITEMS_PER_THREAD, - typename InputIteratorT> -__device__ __forceinline__ void LoadDirectStriped( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load -{ - InputIteratorT thread_itr = block_itr + linear_tid; - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - items[ITEM] = thread_itr[ITEM * BLOCK_THREADS]; - } -} - - -/** - * \brief Load a linear segment of items into a striped arrangement across the thread block, guarded by range - * - * \striped - * - * \tparam BLOCK_THREADS The thread block size in threads - * \tparam T [inferred] The data type to load. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam InputIteratorT [inferred] The random-access iterator type for input \iterator. - */ -template < - int BLOCK_THREADS, - typename InputT, - int ITEMS_PER_THREAD, - typename InputIteratorT> -__device__ __forceinline__ void LoadDirectStriped( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items) ///< [in] Number of valid items to load -{ - InputIteratorT thread_itr = block_itr + linear_tid; - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if (linear_tid + (ITEM * BLOCK_THREADS) < valid_items) - { - items[ITEM] = thread_itr[ITEM * BLOCK_THREADS]; - } - } -} - - -/** - * \brief Load a linear segment of items into a striped arrangement across the thread block, guarded by range, with a fall-back assignment of out-of-bound elements. - * - * \striped - * - * \tparam BLOCK_THREADS The thread block size in threads - * \tparam T [inferred] The data type to load. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam InputIteratorT [inferred] The random-access iterator type for input \iterator. - */ -template < - int BLOCK_THREADS, - typename InputT, - typename DefaultT, - int ITEMS_PER_THREAD, - typename InputIteratorT> -__device__ __forceinline__ void LoadDirectStriped( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items, ///< [in] Number of valid items to load - DefaultT oob_default) ///< [in] Default value to assign out-of-bound items -{ - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - items[ITEM] = oob_default; - - LoadDirectStriped(linear_tid, block_itr, items, valid_items); -} - - - -//@} end member group -/******************************************************************//** - * \name Warp-striped arrangement I/O (direct) - *********************************************************************/ -//@{ - - -/** - * \brief Load a linear segment of items into a warp-striped arrangement across the thread block. - * - * \warpstriped - * - * \par Usage Considerations - * The number of threads in the thread block must be a multiple of the architecture's warp size. - * - * \tparam T [inferred] The data type to load. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam InputIteratorT [inferred] The random-access iterator type for input \iterator. - */ -template < - typename InputT, - int ITEMS_PER_THREAD, - typename InputIteratorT> -__device__ __forceinline__ void LoadDirectWarpStriped( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load -{ - int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); - int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; - int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; - - InputIteratorT thread_itr = block_itr + warp_offset + tid ; - - // Load directly in warp-striped order - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - items[ITEM] = thread_itr[(ITEM * CUB_PTX_WARP_THREADS)]; - } -} - - -/** - * \brief Load a linear segment of items into a warp-striped arrangement across the thread block, guarded by range - * - * \warpstriped - * - * \par Usage Considerations - * The number of threads in the thread block must be a multiple of the architecture's warp size. - * - * \tparam T [inferred] The data type to load. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam InputIteratorT [inferred] The random-access iterator type for input \iterator. - */ -template < - typename InputT, - int ITEMS_PER_THREAD, - typename InputIteratorT> -__device__ __forceinline__ void LoadDirectWarpStriped( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items) ///< [in] Number of valid items to load -{ - int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); - int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; - int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; - - InputIteratorT thread_itr = block_itr + warp_offset + tid ; - - // Load directly in warp-striped order - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if (warp_offset + tid + (ITEM * CUB_PTX_WARP_THREADS) < valid_items) - { - items[ITEM] = thread_itr[(ITEM * CUB_PTX_WARP_THREADS)]; - } - } -} - - -/** - * \brief Load a linear segment of items into a warp-striped arrangement across the thread block, guarded by range, with a fall-back assignment of out-of-bound elements. - * - * \warpstriped - * - * \par Usage Considerations - * The number of threads in the thread block must be a multiple of the architecture's warp size. - * - * \tparam T [inferred] The data type to load. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam InputIteratorT [inferred] The random-access iterator type for input \iterator. - */ -template < - typename InputT, - typename DefaultT, - int ITEMS_PER_THREAD, - typename InputIteratorT> -__device__ __forceinline__ void LoadDirectWarpStriped( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items, ///< [in] Number of valid items to load - DefaultT oob_default) ///< [in] Default value to assign out-of-bound items -{ - // Load directly in warp-striped order - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - items[ITEM] = oob_default; - - LoadDirectWarpStriped(linear_tid, block_itr, items, valid_items); -} - - - -//@} end member group - -/** @} */ // end group UtilIo - - - -//----------------------------------------------------------------------------- -// Generic BlockLoad abstraction -//----------------------------------------------------------------------------- - -/** - * \brief cub::BlockLoadAlgorithm enumerates alternative algorithms for cub::BlockLoad to read a linear segment of data from memory into a blocked arrangement across a CUDA thread block. - */ - -/** - * \brief cub::BlockLoadAlgorithm enumerates alternative algorithms for cub::BlockLoad to read a linear segment of data from memory into a blocked arrangement across a CUDA thread block. - */ -enum BlockLoadAlgorithm -{ - /** - * \par Overview - * - * A [blocked arrangement](index.html#sec5sec3) of data is read - * directly from memory. - * - * \par Performance Considerations - * - The utilization of memory transactions (coalescing) decreases as the - * access stride between threads increases (i.e., the number items per thread). - */ - BLOCK_LOAD_DIRECT, - - /** - * \par Overview - * - * A [blocked arrangement](index.html#sec5sec3) of data is read - * from memory using CUDA's built-in vectorized loads as a coalescing optimization. - * For example, ld.global.v4.s32 instructions will be generated - * when \p T = \p int and \p ITEMS_PER_THREAD % 4 == 0. - * - * \par Performance Considerations - * - The utilization of memory transactions (coalescing) remains high until the the - * access stride between threads (i.e., the number items per thread) exceeds the - * maximum vector load width (typically 4 items or 64B, whichever is lower). - * - The following conditions will prevent vectorization and loading will fall back to cub::BLOCK_LOAD_DIRECT: - * - \p ITEMS_PER_THREAD is odd - * - The \p InputIteratorTis not a simple pointer type - * - The block input offset is not quadword-aligned - * - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.) - */ - BLOCK_LOAD_VECTORIZE, - - /** - * \par Overview - * - * A [striped arrangement](index.html#sec5sec3) of data is read - * efficiently from memory and then locally transposed into a - * [blocked arrangement](index.html#sec5sec3). - * - * \par Performance Considerations - * - The utilization of memory transactions (coalescing) remains high regardless - * of items loaded per thread. - * - The local reordering incurs slightly longer latencies and throughput than the - * direct cub::BLOCK_LOAD_DIRECT and cub::BLOCK_LOAD_VECTORIZE alternatives. - */ - BLOCK_LOAD_TRANSPOSE, - - - /** - * \par Overview - * - * A [warp-striped arrangement](index.html#sec5sec3) of data is - * read efficiently from memory and then locally transposed into a - * [blocked arrangement](index.html#sec5sec3). - * - * \par Usage Considerations - * - BLOCK_THREADS must be a multiple of WARP_THREADS - * - * \par Performance Considerations - * - The utilization of memory transactions (coalescing) remains high regardless - * of items loaded per thread. - * - The local reordering incurs slightly larger latencies than the - * direct cub::BLOCK_LOAD_DIRECT and cub::BLOCK_LOAD_VECTORIZE alternatives. - * - Provisions more shared storage, but incurs smaller latencies than the - * BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED alternative. - */ - BLOCK_LOAD_WARP_TRANSPOSE, - - - /** - * \par Overview - * - * Like \p BLOCK_LOAD_WARP_TRANSPOSE, a [warp-striped arrangement](index.html#sec5sec3) - * of data is read directly from memory and then is locally transposed into a - * [blocked arrangement](index.html#sec5sec3). To reduce the shared memory - * requirement, only one warp's worth of shared memory is provisioned and is - * subsequently time-sliced among warps. - * - * \par Usage Considerations - * - BLOCK_THREADS must be a multiple of WARP_THREADS - * - * \par Performance Considerations - * - The utilization of memory transactions (coalescing) remains high regardless - * of items loaded per thread. - * - Provisions less shared memory temporary storage, but incurs larger - * latencies than the BLOCK_LOAD_WARP_TRANSPOSE alternative. - */ - BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED, -}; - - -/** - * \brief The BlockLoad class provides [collective](index.html#sec0) data movement methods for loading a linear segment of items from memory into a [blocked arrangement](index.html#sec5sec3) across a CUDA thread block. ![](block_load_logo.png) - * \ingroup BlockModule - * \ingroup UtilIo - * - * \tparam InputT The data type to read into (which must be convertible from the input iterator's value type). - * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension - * \tparam ITEMS_PER_THREAD The number of consecutive items partitioned onto each thread. - * \tparam ALGORITHM [optional] cub::BlockLoadAlgorithm tuning policy. default: cub::BLOCK_LOAD_DIRECT. - * \tparam WARP_TIME_SLICING [optional] Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage). (default: false) - * \tparam BLOCK_DIM_Y [optional] The thread block length in threads along the Y dimension (default: 1) - * \tparam BLOCK_DIM_Z [optional] The thread block length in threads along the Z dimension (default: 1) - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * - The BlockLoad class provides a single data movement abstraction that can be specialized - * to implement different cub::BlockLoadAlgorithm strategies. This facilitates different - * performance policies for different architectures, data types, granularity sizes, etc. - * - BlockLoad can be optionally specialized by different data movement strategies: - * -# cub::BLOCK_LOAD_DIRECT. A [blocked arrangement](index.html#sec5sec3) - * of data is read directly from memory. [More...](\ref cub::BlockLoadAlgorithm) - * -# cub::BLOCK_LOAD_VECTORIZE. A [blocked arrangement](index.html#sec5sec3) - * of data is read directly from memory using CUDA's built-in vectorized loads as a - * coalescing optimization. [More...](\ref cub::BlockLoadAlgorithm) - * -# cub::BLOCK_LOAD_TRANSPOSE. A [striped arrangement](index.html#sec5sec3) - * of data is read directly from memory and is then locally transposed into a - * [blocked arrangement](index.html#sec5sec3). [More...](\ref cub::BlockLoadAlgorithm) - * -# cub::BLOCK_LOAD_WARP_TRANSPOSE. A [warp-striped arrangement](index.html#sec5sec3) - * of data is read directly from memory and is then locally transposed into a - * [blocked arrangement](index.html#sec5sec3). [More...](\ref cub::BlockLoadAlgorithm) - * -# cub::BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED,. A [warp-striped arrangement](index.html#sec5sec3) - * of data is read directly from memory and is then locally transposed into a - * [blocked arrangement](index.html#sec5sec3) one warp at a time. [More...](\ref cub::BlockLoadAlgorithm) - * - \rowmajor - * - * \par A Simple Example - * \blockcollective{BlockLoad} - * \par - * The code snippet below illustrates the loading of a linear - * segment of 512 integers into a "blocked" arrangement across 128 threads where each - * thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE, - * meaning memory references are efficiently coalesced using a warp-striped access - * pattern (after which items are locally reordered among threads). - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, ...) - * { - * // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockLoad BlockLoad; - * - * // Allocate shared memory for BlockLoad - * __shared__ typename BlockLoad::TempStorage temp_storage; - * - * // Load a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * BlockLoad(temp_storage).Load(d_data, thread_data); - * - * \endcode - * \par - * Suppose the input \p d_data is 0, 1, 2, 3, 4, 5, .... - * The set of \p thread_data across the block of threads in those threads will be - * { [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }. - * - */ -template < - typename InputT, - int BLOCK_DIM_X, - int ITEMS_PER_THREAD, - BlockLoadAlgorithm ALGORITHM = BLOCK_LOAD_DIRECT, - int BLOCK_DIM_Y = 1, - int BLOCK_DIM_Z = 1, - int PTX_ARCH = CUB_PTX_ARCH> -class BlockLoad -{ -private: - - /****************************************************************************** - * Constants and typed definitions - ******************************************************************************/ - - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - }; - - - /****************************************************************************** - * Algorithmic variants - ******************************************************************************/ - - /// Load helper - template - struct LoadInternal; - - - /** - * BLOCK_LOAD_DIRECT specialization of load helper - */ - template - struct LoadInternal - { - /// Shared memory storage layout type - typedef NullType TempStorage; - - /// Linear thread-id - int linear_tid; - - /// Constructor - __device__ __forceinline__ LoadInternal( - TempStorage &/*temp_storage*/, - int linear_tid) - : - linear_tid(linear_tid) - {} - - /// Load a linear segment of items from memory - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load - { - LoadDirectBlocked(linear_tid, block_itr, items); - } - - /// Load a linear segment of items from memory, guarded by range - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items) ///< [in] Number of valid items to load - { - LoadDirectBlocked(linear_tid, block_itr, items, valid_items); - } - - /// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items, ///< [in] Number of valid items to load - DefaultT oob_default) ///< [in] Default value to assign out-of-bound items - { - LoadDirectBlocked(linear_tid, block_itr, items, valid_items, oob_default); - } - - }; - - - /** - * BLOCK_LOAD_VECTORIZE specialization of load helper - */ - template - struct LoadInternal - { - /// Shared memory storage layout type - typedef NullType TempStorage; - - /// Linear thread-id - int linear_tid; - - /// Constructor - __device__ __forceinline__ LoadInternal( - TempStorage &/*temp_storage*/, - int linear_tid) - : - linear_tid(linear_tid) - {} - - /// Load a linear segment of items from memory, specialized for native pointer types (attempts vectorization) - template - __device__ __forceinline__ void Load( - InputT *block_ptr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load - { - InternalLoadDirectBlockedVectorized(linear_tid, block_ptr, items); - } - - /// Load a linear segment of items from memory, specialized for native pointer types (attempts vectorization) - template - __device__ __forceinline__ void Load( - const InputT *block_ptr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load - { - InternalLoadDirectBlockedVectorized(linear_tid, block_ptr, items); - } - - /// Load a linear segment of items from memory, specialized for native pointer types (attempts vectorization) - template < - CacheLoadModifier MODIFIER, - typename ValueType, - typename OffsetT> - __device__ __forceinline__ void Load( - CacheModifiedInputIterator block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load - { - InternalLoadDirectBlockedVectorized(linear_tid, block_itr.ptr, items); - } - - /// Load a linear segment of items from memory, specialized for opaque input iterators (skips vectorization) - template - __device__ __forceinline__ void Load( - _InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load - { - LoadDirectBlocked(linear_tid, block_itr, items); - } - - /// Load a linear segment of items from memory, guarded by range (skips vectorization) - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items) ///< [in] Number of valid items to load - { - LoadDirectBlocked(linear_tid, block_itr, items, valid_items); - } - - /// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements (skips vectorization) - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items, ///< [in] Number of valid items to load - DefaultT oob_default) ///< [in] Default value to assign out-of-bound items - { - LoadDirectBlocked(linear_tid, block_itr, items, valid_items, oob_default); - } - - }; - - - /** - * BLOCK_LOAD_TRANSPOSE specialization of load helper - */ - template - struct LoadInternal - { - // BlockExchange utility type for keys - typedef BlockExchange BlockExchange; - - /// Shared memory storage layout type - struct _TempStorage : BlockExchange::TempStorage - { - /// Temporary storage for partially-full block guard - volatile int valid_items; - }; - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - /// Thread reference to shared storage - _TempStorage &temp_storage; - - /// Linear thread-id - int linear_tid; - - /// Constructor - __device__ __forceinline__ LoadInternal( - TempStorage &temp_storage, - int linear_tid) - : - temp_storage(temp_storage.Alias()), - linear_tid(linear_tid) - {} - - /// Load a linear segment of items from memory - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load{ - { - LoadDirectStriped(linear_tid, block_itr, items); - BlockExchange(temp_storage).StripedToBlocked(items, items); - } - - /// Load a linear segment of items from memory, guarded by range - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items) ///< [in] Number of valid items to load - { - if (linear_tid == 0) - temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads - CTA_SYNC(); - LoadDirectStriped(linear_tid, block_itr, items, temp_storage.valid_items); - BlockExchange(temp_storage).StripedToBlocked(items, items); - } - - /// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items, ///< [in] Number of valid items to load - DefaultT oob_default) ///< [in] Default value to assign out-of-bound items - { - if (linear_tid == 0) - temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads - CTA_SYNC(); - LoadDirectStriped(linear_tid, block_itr, items, temp_storage.valid_items, oob_default); - BlockExchange(temp_storage).StripedToBlocked(items, items); - } - - }; - - - /** - * BLOCK_LOAD_WARP_TRANSPOSE specialization of load helper - */ - template - struct LoadInternal - { - enum - { - WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH) - }; - - // Assert BLOCK_THREADS must be a multiple of WARP_THREADS - CUB_STATIC_ASSERT((BLOCK_THREADS % WARP_THREADS == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS"); - - // BlockExchange utility type for keys - typedef BlockExchange BlockExchange; - - /// Shared memory storage layout type - struct _TempStorage : BlockExchange::TempStorage - { - /// Temporary storage for partially-full block guard - volatile int valid_items; - }; - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - /// Thread reference to shared storage - _TempStorage &temp_storage; - - /// Linear thread-id - int linear_tid; - - /// Constructor - __device__ __forceinline__ LoadInternal( - TempStorage &temp_storage, - int linear_tid) - : - temp_storage(temp_storage.Alias()), - linear_tid(linear_tid) - {} - - /// Load a linear segment of items from memory - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load{ - { - LoadDirectWarpStriped(linear_tid, block_itr, items); - BlockExchange(temp_storage).WarpStripedToBlocked(items, items); - } - - /// Load a linear segment of items from memory, guarded by range - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items) ///< [in] Number of valid items to load - { - if (linear_tid == 0) - temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads - CTA_SYNC(); - LoadDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items); - BlockExchange(temp_storage).WarpStripedToBlocked(items, items); - } - - - /// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items, ///< [in] Number of valid items to load - DefaultT oob_default) ///< [in] Default value to assign out-of-bound items - { - if (linear_tid == 0) - temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads - CTA_SYNC(); - LoadDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items, oob_default); - BlockExchange(temp_storage).WarpStripedToBlocked(items, items); - } - }; - - - /** - * BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED specialization of load helper - */ - template - struct LoadInternal - { - enum - { - WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH) - }; - - // Assert BLOCK_THREADS must be a multiple of WARP_THREADS - CUB_STATIC_ASSERT((BLOCK_THREADS % WARP_THREADS == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS"); - - // BlockExchange utility type for keys - typedef BlockExchange BlockExchange; - - /// Shared memory storage layout type - struct _TempStorage : BlockExchange::TempStorage - { - /// Temporary storage for partially-full block guard - volatile int valid_items; - }; - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - /// Thread reference to shared storage - _TempStorage &temp_storage; - - /// Linear thread-id - int linear_tid; - - /// Constructor - __device__ __forceinline__ LoadInternal( - TempStorage &temp_storage, - int linear_tid) - : - temp_storage(temp_storage.Alias()), - linear_tid(linear_tid) - {} - - /// Load a linear segment of items from memory - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load{ - { - LoadDirectWarpStriped(linear_tid, block_itr, items); - BlockExchange(temp_storage).WarpStripedToBlocked(items, items); - } - - /// Load a linear segment of items from memory, guarded by range - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items) ///< [in] Number of valid items to load - { - if (linear_tid == 0) - temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads - CTA_SYNC(); - LoadDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items); - BlockExchange(temp_storage).WarpStripedToBlocked(items, items); - } - - - /// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items, ///< [in] Number of valid items to load - DefaultT oob_default) ///< [in] Default value to assign out-of-bound items - { - if (linear_tid == 0) - temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads - CTA_SYNC(); - LoadDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items, oob_default); - BlockExchange(temp_storage).WarpStripedToBlocked(items, items); - } - }; - - - /****************************************************************************** - * Type definitions - ******************************************************************************/ - - /// Internal load implementation to use - typedef LoadInternal InternalLoad; - - - /// Shared memory storage layout type - typedef typename InternalLoad::TempStorage _TempStorage; - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /// Internal storage allocator - __device__ __forceinline__ _TempStorage& PrivateStorage() - { - __shared__ _TempStorage private_storage; - return private_storage; - } - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Thread reference to shared storage - _TempStorage &temp_storage; - - /// Linear thread-id - int linear_tid; - -public: - - /// \smemstorage{BlockLoad} - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using a private static allocation of shared memory as temporary storage. - */ - __device__ __forceinline__ BlockLoad() - : - temp_storage(PrivateStorage()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. - */ - __device__ __forceinline__ BlockLoad( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - - - //@} end member group - /******************************************************************//** - * \name Data movement - *********************************************************************/ - //@{ - - - /** - * \brief Load a linear segment of items from memory. - * - * \par - * - \blocked - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the loading of a linear - * segment of 512 integers into a "blocked" arrangement across 128 threads where each - * thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE, - * meaning memory references are efficiently coalesced using a warp-striped access - * pattern (after which items are locally reordered among threads). - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, ...) - * { - * // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockLoad BlockLoad; - * - * // Allocate shared memory for BlockLoad - * __shared__ typename BlockLoad::TempStorage temp_storage; - * - * // Load a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * BlockLoad(temp_storage).Load(d_data, thread_data); - * - * \endcode - * \par - * Suppose the input \p d_data is 0, 1, 2, 3, 4, 5, .... - * The set of \p thread_data across the block of threads in those threads will be - * { [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }. - * - */ - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load - { - InternalLoad(temp_storage, linear_tid).Load(block_itr, items); - } - - - /** - * \brief Load a linear segment of items from memory, guarded by range. - * - * \par - * - \blocked - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the guarded loading of a linear - * segment of 512 integers into a "blocked" arrangement across 128 threads where each - * thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE, - * meaning memory references are efficiently coalesced using a warp-striped access - * pattern (after which items are locally reordered among threads). - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, int valid_items, ...) - * { - * // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockLoad BlockLoad; - * - * // Allocate shared memory for BlockLoad - * __shared__ typename BlockLoad::TempStorage temp_storage; - * - * // Load a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * BlockLoad(temp_storage).Load(d_data, thread_data, valid_items); - * - * \endcode - * \par - * Suppose the input \p d_data is 0, 1, 2, 3, 4, 5, 6... and \p valid_items is \p 5. - * The set of \p thread_data across the block of threads in those threads will be - * { [0,1,2,3], [4,?,?,?], ..., [?,?,?,?] }, with only the first two threads - * being unmasked to load portions of valid data (and other items remaining unassigned). - * - */ - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items) ///< [in] Number of valid items to load - { - InternalLoad(temp_storage, linear_tid).Load(block_itr, items, valid_items); - } - - - /** - * \brief Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements - * - * \par - * - \blocked - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the guarded loading of a linear - * segment of 512 integers into a "blocked" arrangement across 128 threads where each - * thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE, - * meaning memory references are efficiently coalesced using a warp-striped access - * pattern (after which items are locally reordered among threads). - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, int valid_items, ...) - * { - * // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockLoad BlockLoad; - * - * // Allocate shared memory for BlockLoad - * __shared__ typename BlockLoad::TempStorage temp_storage; - * - * // Load a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * BlockLoad(temp_storage).Load(d_data, thread_data, valid_items, -1); - * - * \endcode - * \par - * Suppose the input \p d_data is 0, 1, 2, 3, 4, 5, 6..., - * \p valid_items is \p 5, and the out-of-bounds default is \p -1. - * The set of \p thread_data across the block of threads in those threads will be - * { [0,1,2,3], [4,-1,-1,-1], ..., [-1,-1,-1,-1] }, with only the first two threads - * being unmasked to load portions of valid data (and other items are assigned \p -1) - * - */ - template - __device__ __forceinline__ void Load( - InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from - InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load - int valid_items, ///< [in] Number of valid items to load - DefaultT oob_default) ///< [in] Default value to assign out-of-bound items - { - InternalLoad(temp_storage, linear_tid).Load(block_itr, items, valid_items, oob_default); - } - - - //@} end member group - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/block_radix_rank.cuh b/ml-xgboost/cub/cub/block/block_radix_rank.cuh deleted file mode 100644 index 8ca925f..0000000 --- a/ml-xgboost/cub/cub/block/block_radix_rank.cuh +++ /dev/null @@ -1,432 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::BlockRadixRank provides operations for ranking unsigned integer types within a CUDA threadblock - */ - -#pragma once - -#include "../thread/thread_reduce.cuh" -#include "../thread/thread_scan.cuh" -#include "../block/block_scan.cuh" -#include "../util_ptx.cuh" -#include "../util_arch.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief BlockRadixRank provides operations for ranking unsigned integer types within a CUDA threadblock. - * \ingroup BlockModule - * - * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension - * \tparam RADIX_BITS The number of radix bits per digit place - * \tparam DESCENDING Whether or not the sorted-order is high-to-low - * \tparam MEMOIZE_OUTER_SCAN [optional] Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure (default: true for architectures SM35 and newer, false otherwise). See BlockScanAlgorithm::BLOCK_SCAN_RAKING_MEMOIZE for more details. - * \tparam INNER_SCAN_ALGORITHM [optional] The cub::BlockScanAlgorithm algorithm to use (default: cub::BLOCK_SCAN_WARP_SCANS) - * \tparam SMEM_CONFIG [optional] Shared memory bank mode (default: \p cudaSharedMemBankSizeFourByte) - * \tparam BLOCK_DIM_Y [optional] The thread block length in threads along the Y dimension (default: 1) - * \tparam BLOCK_DIM_Z [optional] The thread block length in threads along the Z dimension (default: 1) - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * Blah... - * - Keys must be in a form suitable for radix ranking (i.e., unsigned bits). - * - \blocked - * - * \par Performance Considerations - * - \granularity - * - * \par Examples - * \par - * - Example 1: Simple radix rank of 32-bit integer keys - * \code - * #include - * - * template - * __global__ void ExampleKernel(...) - * { - * - * \endcode - */ -template < - int BLOCK_DIM_X, - int RADIX_BITS, - bool DESCENDING, - bool MEMOIZE_OUTER_SCAN = (CUB_PTX_ARCH >= 350) ? true : false, - BlockScanAlgorithm INNER_SCAN_ALGORITHM = BLOCK_SCAN_WARP_SCANS, - cudaSharedMemConfig SMEM_CONFIG = cudaSharedMemBankSizeFourByte, - int BLOCK_DIM_Y = 1, - int BLOCK_DIM_Z = 1, - int PTX_ARCH = CUB_PTX_ARCH> -class BlockRadixRank -{ -private: - - /****************************************************************************** - * Type definitions and constants - ******************************************************************************/ - - // Integer type for digit counters (to be packed into words of type PackedCounters) - typedef unsigned short DigitCounter; - - // Integer type for packing DigitCounters into columns of shared memory banks - typedef typename If<(SMEM_CONFIG == cudaSharedMemBankSizeEightByte), - unsigned long long, - unsigned int>::Type PackedCounter; - - enum - { - // The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - - RADIX_DIGITS = 1 << RADIX_BITS, - - LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(PTX_ARCH), - WARP_THREADS = 1 << LOG_WARP_THREADS, - WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, - - BYTES_PER_COUNTER = sizeof(DigitCounter), - LOG_BYTES_PER_COUNTER = Log2::VALUE, - - PACKING_RATIO = sizeof(PackedCounter) / sizeof(DigitCounter), - LOG_PACKING_RATIO = Log2::VALUE, - - LOG_COUNTER_LANES = CUB_MAX((RADIX_BITS - LOG_PACKING_RATIO), 0), // Always at least one lane - COUNTER_LANES = 1 << LOG_COUNTER_LANES, - - // The number of packed counters per thread (plus one for padding) - PADDED_COUNTER_LANES = COUNTER_LANES + 1, - RAKING_SEGMENT = PADDED_COUNTER_LANES, - - LOG_SMEM_BANKS = CUB_LOG_SMEM_BANKS(PTX_ARCH), - SMEM_BANKS = 1 << LOG_SMEM_BANKS, - }; - - - /// BlockScan type - typedef BlockScan< - PackedCounter, - BLOCK_DIM_X, - INNER_SCAN_ALGORITHM, - BLOCK_DIM_Y, - BLOCK_DIM_Z, - PTX_ARCH> - BlockScan; - - - /// Shared memory storage layout type for BlockRadixRank - struct __align__(16) _TempStorage - { - union - { - DigitCounter digit_counters[PADDED_COUNTER_LANES][BLOCK_THREADS][PACKING_RATIO]; - PackedCounter raking_grid[BLOCK_THREADS][RAKING_SEGMENT]; - }; - - // Storage for scanning local ranks - typename BlockScan::TempStorage block_scan; - }; - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Shared storage reference - _TempStorage &temp_storage; - - /// Linear thread-id - unsigned int linear_tid; - - /// Copy of raking segment, promoted to registers - PackedCounter cached_segment[RAKING_SEGMENT]; - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /** - * Internal storage allocator - */ - __device__ __forceinline__ _TempStorage& PrivateStorage() - { - __shared__ _TempStorage private_storage; - return private_storage; - } - - - /** - * Performs upsweep raking reduction, returning the aggregate - */ - __device__ __forceinline__ PackedCounter Upsweep() - { - PackedCounter *smem_raking_ptr = temp_storage.raking_grid[linear_tid]; - PackedCounter *raking_ptr; - - if (MEMOIZE_OUTER_SCAN) - { - // Copy data into registers - #pragma unroll - for (int i = 0; i < RAKING_SEGMENT; i++) - { - cached_segment[i] = smem_raking_ptr[i]; - } - raking_ptr = cached_segment; - } - else - { - raking_ptr = smem_raking_ptr; - } - - return ThreadReduce(raking_ptr, Sum()); - } - - - /// Performs exclusive downsweep raking scan - __device__ __forceinline__ void ExclusiveDownsweep( - PackedCounter raking_partial) - { - PackedCounter *smem_raking_ptr = temp_storage.raking_grid[linear_tid]; - - PackedCounter *raking_ptr = (MEMOIZE_OUTER_SCAN) ? - cached_segment : - smem_raking_ptr; - - // Exclusive raking downsweep scan - ThreadScanExclusive(raking_ptr, raking_ptr, Sum(), raking_partial); - - if (MEMOIZE_OUTER_SCAN) - { - // Copy data back to smem - #pragma unroll - for (int i = 0; i < RAKING_SEGMENT; i++) - { - smem_raking_ptr[i] = cached_segment[i]; - } - } - } - - - /** - * Reset shared memory digit counters - */ - __device__ __forceinline__ void ResetCounters() - { - // Reset shared memory digit counters - #pragma unroll - for (int LANE = 0; LANE < PADDED_COUNTER_LANES; LANE++) - { - *((PackedCounter*) temp_storage.digit_counters[LANE][linear_tid]) = 0; - } - } - - - /** - * Block-scan prefix callback - */ - struct PrefixCallBack - { - __device__ __forceinline__ PackedCounter operator()(PackedCounter block_aggregate) - { - PackedCounter block_prefix = 0; - - // Propagate totals in packed fields - #pragma unroll - for (int PACKED = 1; PACKED < PACKING_RATIO; PACKED++) - { - block_prefix += block_aggregate << (sizeof(DigitCounter) * 8 * PACKED); - } - - return block_prefix; - } - }; - - - /** - * Scan shared memory digit counters. - */ - __device__ __forceinline__ void ScanCounters() - { - // Upsweep scan - PackedCounter raking_partial = Upsweep(); - - // Compute exclusive sum - PackedCounter exclusive_partial; - PrefixCallBack prefix_call_back; - BlockScan(temp_storage.block_scan).ExclusiveSum(raking_partial, exclusive_partial, prefix_call_back); - - // Downsweep scan with exclusive partial - ExclusiveDownsweep(exclusive_partial); - } - -public: - - /// \smemstorage{BlockScan} - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using a private static allocation of shared memory as temporary storage. - */ - __device__ __forceinline__ BlockRadixRank() - : - temp_storage(PrivateStorage()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. - */ - __device__ __forceinline__ BlockRadixRank( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - //@} end member group - /******************************************************************//** - * \name Raking - *********************************************************************/ - //@{ - - /** - * \brief Rank keys. - */ - template < - typename UnsignedBits, - int KEYS_PER_THREAD> - __device__ __forceinline__ void RankKeys( - UnsignedBits (&keys)[KEYS_PER_THREAD], ///< [in] Keys for this tile - int (&ranks)[KEYS_PER_THREAD], ///< [out] For each key, the local rank within the tile - int current_bit, ///< [in] The least-significant bit position of the current digit to extract - int num_bits) ///< [in] The number of bits in the current digit - { - DigitCounter thread_prefixes[KEYS_PER_THREAD]; // For each key, the count of previous keys in this tile having the same digit - DigitCounter* digit_counters[KEYS_PER_THREAD]; // For each key, the byte-offset of its corresponding digit counter in smem - - // Reset shared memory digit counters - ResetCounters(); - - for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM) - { - // Get digit - unsigned int digit = BFE(keys[ITEM], current_bit, num_bits); - - // Get sub-counter - unsigned int sub_counter = digit >> LOG_COUNTER_LANES; - - // Get counter lane - unsigned int counter_lane = digit & (COUNTER_LANES - 1); - - if (DESCENDING) - { - sub_counter = PACKING_RATIO - 1 - sub_counter; - counter_lane = COUNTER_LANES - 1 - counter_lane; - } - - // Pointer to smem digit counter - digit_counters[ITEM] = &temp_storage.digit_counters[counter_lane][linear_tid][sub_counter]; - - // Load thread-exclusive prefix - thread_prefixes[ITEM] = *digit_counters[ITEM]; - - // Store inclusive prefix - *digit_counters[ITEM] = thread_prefixes[ITEM] + 1; - } - - CTA_SYNC(); - - // Scan shared memory counters - ScanCounters(); - - CTA_SYNC(); - - // Extract the local ranks of each key - for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM) - { - // Add in threadblock exclusive prefix - ranks[ITEM] = thread_prefixes[ITEM] + *digit_counters[ITEM]; - } - } - - - /** - * \brief Rank keys. For the lower \p RADIX_DIGITS threads, digit counts for each digit are provided for the corresponding thread. - */ - template < - typename UnsignedBits, - int KEYS_PER_THREAD> - __device__ __forceinline__ void RankKeys( - UnsignedBits (&keys)[KEYS_PER_THREAD], ///< [in] Keys for this tile - int (&ranks)[KEYS_PER_THREAD], ///< [out] For each key, the local rank within the tile (out parameter) - int current_bit, ///< [in] The least-significant bit position of the current digit to extract - int num_bits, ///< [in] The number of bits in the current digit - int &exclusive_digit_prefix) ///< [out] The exclusive prefix sum for the digit threadIdx.x - { - // Rank keys - RankKeys(keys, ranks, current_bit, num_bits); - - // Get the inclusive and exclusive digit totals corresponding to the calling thread. - if ((BLOCK_THREADS == RADIX_DIGITS) || (linear_tid < RADIX_DIGITS)) - { - unsigned int bin_idx = (DESCENDING) ? - RADIX_DIGITS - linear_tid - 1 : - linear_tid; - - // Obtain ex/inclusive digit counts. (Unfortunately these all reside in the - // first counter column, resulting in unavoidable bank conflicts.) - unsigned int counter_lane = (bin_idx & (COUNTER_LANES - 1)); - unsigned int sub_counter = bin_idx >> (LOG_COUNTER_LANES); - - exclusive_digit_prefix = temp_storage.digit_counters[counter_lane][0][sub_counter]; - } - } -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/block/block_radix_sort.cuh b/ml-xgboost/cub/cub/block/block_radix_sort.cuh deleted file mode 100644 index b82d956..0000000 --- a/ml-xgboost/cub/cub/block/block_radix_sort.cuh +++ /dev/null @@ -1,865 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::BlockRadixSort class provides [collective](index.html#sec0) methods for radix sorting of items partitioned across a CUDA thread block. - */ - - -#pragma once - -#include "block_exchange.cuh" -#include "block_radix_rank.cuh" -#include "../util_ptx.cuh" -#include "../util_arch.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief The BlockRadixSort class provides [collective](index.html#sec0) methods for sorting items partitioned across a CUDA thread block using a radix sorting method. ![](sorting_logo.png) - * \ingroup BlockModule - * - * \tparam KeyT KeyT type - * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension - * \tparam ITEMS_PER_THREAD The number of items per thread - * \tparam ValueT [optional] ValueT type (default: cub::NullType, which indicates a keys-only sort) - * \tparam RADIX_BITS [optional] The number of radix bits per digit place (default: 4 bits) - * \tparam MEMOIZE_OUTER_SCAN [optional] Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure (default: true for architectures SM35 and newer, false otherwise). - * \tparam INNER_SCAN_ALGORITHM [optional] The cub::BlockScanAlgorithm algorithm to use (default: cub::BLOCK_SCAN_WARP_SCANS) - * \tparam SMEM_CONFIG [optional] Shared memory bank mode (default: \p cudaSharedMemBankSizeFourByte) - * \tparam BLOCK_DIM_Y [optional] The thread block length in threads along the Y dimension (default: 1) - * \tparam BLOCK_DIM_Z [optional] The thread block length in threads along the Z dimension (default: 1) - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * - The [radix sorting method](http://en.wikipedia.org/wiki/Radix_sort) arranges - * items into ascending order. It relies upon a positional representation for - * keys, i.e., each key is comprised of an ordered sequence of symbols (e.g., digits, - * characters, etc.) specified from least-significant to most-significant. For a - * given input sequence of keys and a set of rules specifying a total ordering - * of the symbolic alphabet, the radix sorting method produces a lexicographic - * ordering of those keys. - * - BlockRadixSort can sort all of the built-in C++ numeric primitive types, e.g.: - * unsigned char, \p int, \p double, etc. Within each key, the implementation treats fixed-length - * bit-sequences of \p RADIX_BITS as radix digit places. Although the direct radix sorting - * method can only be applied to unsigned integral types, BlockRadixSort - * is able to sort signed and floating-point types via simple bit-wise transformations - * that ensure lexicographic key ordering. - * - \rowmajor - * - * \par Performance Considerations - * - \granularity - * - * \par A Simple Example - * \blockcollective{BlockRadixSort} - * \par - * The code snippet below illustrates a sort of 512 integer keys that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockRadixSort BlockRadixSort; - * - * // Allocate shared memory for BlockRadixSort - * __shared__ typename BlockRadixSort::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_keys[4]; - * ... - * - * // Collectively sort the keys - * BlockRadixSort(temp_storage).Sort(thread_keys); - * - * ... - * \endcode - * \par - * Suppose the set of input \p thread_keys across the block of threads is - * { [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }. The - * corresponding output \p thread_keys in those threads will be - * { [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }. - * - */ -template < - typename KeyT, - int BLOCK_DIM_X, - int ITEMS_PER_THREAD, - typename ValueT = NullType, - int RADIX_BITS = 4, - bool MEMOIZE_OUTER_SCAN = (CUB_PTX_ARCH >= 350) ? true : false, - BlockScanAlgorithm INNER_SCAN_ALGORITHM = BLOCK_SCAN_WARP_SCANS, - cudaSharedMemConfig SMEM_CONFIG = cudaSharedMemBankSizeFourByte, - int BLOCK_DIM_Y = 1, - int BLOCK_DIM_Z = 1, - int PTX_ARCH = CUB_PTX_ARCH> -class BlockRadixSort -{ -private: - - /****************************************************************************** - * Constants and type definitions - ******************************************************************************/ - - enum - { - // The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - - // Whether or not there are values to be trucked along with keys - KEYS_ONLY = Equals::VALUE, - }; - - // KeyT traits and unsigned bits type - typedef Traits KeyTraits; - typedef typename KeyTraits::UnsignedBits UnsignedBits; - - /// Ascending BlockRadixRank utility type - typedef BlockRadixRank< - BLOCK_DIM_X, - RADIX_BITS, - false, - MEMOIZE_OUTER_SCAN, - INNER_SCAN_ALGORITHM, - SMEM_CONFIG, - BLOCK_DIM_Y, - BLOCK_DIM_Z, - PTX_ARCH> - AscendingBlockRadixRank; - - /// Descending BlockRadixRank utility type - typedef BlockRadixRank< - BLOCK_DIM_X, - RADIX_BITS, - true, - MEMOIZE_OUTER_SCAN, - INNER_SCAN_ALGORITHM, - SMEM_CONFIG, - BLOCK_DIM_Y, - BLOCK_DIM_Z, - PTX_ARCH> - DescendingBlockRadixRank; - - /// BlockExchange utility type for keys - typedef BlockExchange BlockExchangeKeys; - - /// BlockExchange utility type for values - typedef BlockExchange BlockExchangeValues; - - /// Shared memory storage layout type - struct _TempStorage - { - union - { - typename AscendingBlockRadixRank::TempStorage asending_ranking_storage; - typename DescendingBlockRadixRank::TempStorage descending_ranking_storage; - typename BlockExchangeKeys::TempStorage exchange_keys; - typename BlockExchangeValues::TempStorage exchange_values; - }; - }; - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Shared storage reference - _TempStorage &temp_storage; - - /// Linear thread-id - unsigned int linear_tid; - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /// Internal storage allocator - __device__ __forceinline__ _TempStorage& PrivateStorage() - { - __shared__ _TempStorage private_storage; - return private_storage; - } - - /// Rank keys (specialized for ascending sort) - __device__ __forceinline__ void RankKeys( - UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD], - int (&ranks)[ITEMS_PER_THREAD], - int begin_bit, - int pass_bits, - Int2Type /*is_descending*/) - { - AscendingBlockRadixRank(temp_storage.asending_ranking_storage).RankKeys( - unsigned_keys, - ranks, - begin_bit, - pass_bits); - } - - /// Rank keys (specialized for descending sort) - __device__ __forceinline__ void RankKeys( - UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD], - int (&ranks)[ITEMS_PER_THREAD], - int begin_bit, - int pass_bits, - Int2Type /*is_descending*/) - { - DescendingBlockRadixRank(temp_storage.descending_ranking_storage).RankKeys( - unsigned_keys, - ranks, - begin_bit, - pass_bits); - } - - /// ExchangeValues (specialized for key-value sort, to-blocked arrangement) - __device__ __forceinline__ void ExchangeValues( - ValueT (&values)[ITEMS_PER_THREAD], - int (&ranks)[ITEMS_PER_THREAD], - Int2Type /*is_keys_only*/, - Int2Type /*is_blocked*/) - { - CTA_SYNC(); - - // Exchange values through shared memory in blocked arrangement - BlockExchangeValues(temp_storage.exchange_values).ScatterToBlocked(values, ranks); - } - - /// ExchangeValues (specialized for key-value sort, to-striped arrangement) - __device__ __forceinline__ void ExchangeValues( - ValueT (&values)[ITEMS_PER_THREAD], - int (&ranks)[ITEMS_PER_THREAD], - Int2Type /*is_keys_only*/, - Int2Type /*is_blocked*/) - { - CTA_SYNC(); - - // Exchange values through shared memory in blocked arrangement - BlockExchangeValues(temp_storage.exchange_values).ScatterToStriped(values, ranks); - } - - /// ExchangeValues (specialized for keys-only sort) - template - __device__ __forceinline__ void ExchangeValues( - ValueT (&/*values*/)[ITEMS_PER_THREAD], - int (&/*ranks*/)[ITEMS_PER_THREAD], - Int2Type /*is_keys_only*/, - Int2Type /*is_blocked*/) - {} - - /// Sort blocked arrangement - template - __device__ __forceinline__ void SortBlocked( - KeyT (&keys)[ITEMS_PER_THREAD], ///< Keys to sort - ValueT (&values)[ITEMS_PER_THREAD], ///< Values to sort - int begin_bit, ///< The beginning (least-significant) bit index needed for key comparison - int end_bit, ///< The past-the-end (most-significant) bit index needed for key comparison - Int2Type is_descending, ///< Tag whether is a descending-order sort - Int2Type is_keys_only) ///< Tag whether is keys-only sort - { - UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD] = - reinterpret_cast(keys); - - // Twiddle bits if necessary - #pragma unroll - for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) - { - unsigned_keys[KEY] = KeyTraits::TwiddleIn(unsigned_keys[KEY]); - } - - // Radix sorting passes - while (true) - { - int pass_bits = CUB_MIN(RADIX_BITS, end_bit - begin_bit); - - // Rank the blocked keys - int ranks[ITEMS_PER_THREAD]; - RankKeys(unsigned_keys, ranks, begin_bit, pass_bits, is_descending); - begin_bit += RADIX_BITS; - - CTA_SYNC(); - - // Exchange keys through shared memory in blocked arrangement - BlockExchangeKeys(temp_storage.exchange_keys).ScatterToBlocked(keys, ranks); - - // Exchange values through shared memory in blocked arrangement - ExchangeValues(values, ranks, is_keys_only, Int2Type()); - - // Quit if done - if (begin_bit >= end_bit) break; - - CTA_SYNC(); - } - - // Untwiddle bits if necessary - #pragma unroll - for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) - { - unsigned_keys[KEY] = KeyTraits::TwiddleOut(unsigned_keys[KEY]); - } - } - -public: - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - /// Sort blocked -> striped arrangement - template - __device__ __forceinline__ void SortBlockedToStriped( - KeyT (&keys)[ITEMS_PER_THREAD], ///< Keys to sort - ValueT (&values)[ITEMS_PER_THREAD], ///< Values to sort - int begin_bit, ///< The beginning (least-significant) bit index needed for key comparison - int end_bit, ///< The past-the-end (most-significant) bit index needed for key comparison - Int2Type is_descending, ///< Tag whether is a descending-order sort - Int2Type is_keys_only) ///< Tag whether is keys-only sort - { - UnsignedBits (&unsigned_keys)[ITEMS_PER_THREAD] = - reinterpret_cast(keys); - - // Twiddle bits if necessary - #pragma unroll - for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) - { - unsigned_keys[KEY] = KeyTraits::TwiddleIn(unsigned_keys[KEY]); - } - - // Radix sorting passes - while (true) - { - int pass_bits = CUB_MIN(RADIX_BITS, end_bit - begin_bit); - - // Rank the blocked keys - int ranks[ITEMS_PER_THREAD]; - RankKeys(unsigned_keys, ranks, begin_bit, pass_bits, is_descending); - begin_bit += RADIX_BITS; - - CTA_SYNC(); - - // Check if this is the last pass - if (begin_bit >= end_bit) - { - // Last pass exchanges keys through shared memory in striped arrangement - BlockExchangeKeys(temp_storage.exchange_keys).ScatterToStriped(keys, ranks); - - // Last pass exchanges through shared memory in striped arrangement - ExchangeValues(values, ranks, is_keys_only, Int2Type()); - - // Quit - break; - } - - // Exchange keys through shared memory in blocked arrangement - BlockExchangeKeys(temp_storage.exchange_keys).ScatterToBlocked(keys, ranks); - - // Exchange values through shared memory in blocked arrangement - ExchangeValues(values, ranks, is_keys_only, Int2Type()); - - CTA_SYNC(); - } - - // Untwiddle bits if necessary - #pragma unroll - for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) - { - unsigned_keys[KEY] = KeyTraits::TwiddleOut(unsigned_keys[KEY]); - } - } - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - /// \smemstorage{BlockRadixSort} - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using a private static allocation of shared memory as temporary storage. - */ - __device__ __forceinline__ BlockRadixSort() - : - temp_storage(PrivateStorage()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. - */ - __device__ __forceinline__ BlockRadixSort( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - //@} end member group - /******************************************************************//** - * \name Sorting (blocked arrangements) - *********************************************************************/ - //@{ - - /** - * \brief Performs an ascending block-wide radix sort over a [blocked arrangement](index.html#sec5sec3) of keys. - * - * \par - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sort of 512 integer keys that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive keys. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each - * typedef cub::BlockRadixSort BlockRadixSort; - * - * // Allocate shared memory for BlockRadixSort - * __shared__ typename BlockRadixSort::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_keys[4]; - * ... - * - * // Collectively sort the keys - * BlockRadixSort(temp_storage).Sort(thread_keys); - * - * \endcode - * \par - * Suppose the set of input \p thread_keys across the block of threads is - * { [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }. - * The corresponding output \p thread_keys in those threads will be - * { [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }. - */ - __device__ __forceinline__ void Sort( - KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort - int begin_bit = 0, ///< [in] [optional] The beginning (least-significant) bit index needed for key comparison - int end_bit = sizeof(KeyT) * 8) ///< [in] [optional] The past-the-end (most-significant) bit index needed for key comparison - { - NullType values[ITEMS_PER_THREAD]; - - SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); - } - - - /** - * \brief Performs an ascending block-wide radix sort across a [blocked arrangement](index.html#sec5sec3) of keys and values. - * - * \par - * - BlockRadixSort can only accommodate one associated tile of values. To "truck along" - * more than one tile of values, simply perform a key-value sort of the keys paired - * with a temporary value array that enumerates the key indices. The reordered indices - * can then be used as a gather-vector for exchanging other associated tile data through - * shared memory. - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sort of 512 integer keys and values that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive pairs. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each - * typedef cub::BlockRadixSort BlockRadixSort; - * - * // Allocate shared memory for BlockRadixSort - * __shared__ typename BlockRadixSort::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_keys[4]; - * int thread_values[4]; - * ... - * - * // Collectively sort the keys and values among block threads - * BlockRadixSort(temp_storage).Sort(thread_keys, thread_values); - * - * \endcode - * \par - * Suppose the set of input \p thread_keys across the block of threads is - * { [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }. The - * corresponding output \p thread_keys in those threads will be - * { [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }. - * - */ - __device__ __forceinline__ void Sort( - KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort - ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort - int begin_bit = 0, ///< [in] [optional] The beginning (least-significant) bit index needed for key comparison - int end_bit = sizeof(KeyT) * 8) ///< [in] [optional] The past-the-end (most-significant) bit index needed for key comparison - { - SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); - } - - /** - * \brief Performs a descending block-wide radix sort over a [blocked arrangement](index.html#sec5sec3) of keys. - * - * \par - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sort of 512 integer keys that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive keys. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each - * typedef cub::BlockRadixSort BlockRadixSort; - * - * // Allocate shared memory for BlockRadixSort - * __shared__ typename BlockRadixSort::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_keys[4]; - * ... - * - * // Collectively sort the keys - * BlockRadixSort(temp_storage).Sort(thread_keys); - * - * \endcode - * \par - * Suppose the set of input \p thread_keys across the block of threads is - * { [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }. - * The corresponding output \p thread_keys in those threads will be - * { [511,510,509,508], [11,10,9,8], [7,6,5,4], ..., [3,2,1,0] }. - */ - __device__ __forceinline__ void SortDescending( - KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort - int begin_bit = 0, ///< [in] [optional] The beginning (least-significant) bit index needed for key comparison - int end_bit = sizeof(KeyT) * 8) ///< [in] [optional] The past-the-end (most-significant) bit index needed for key comparison - { - NullType values[ITEMS_PER_THREAD]; - - SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); - } - - - /** - * \brief Performs a descending block-wide radix sort across a [blocked arrangement](index.html#sec5sec3) of keys and values. - * - * \par - * - BlockRadixSort can only accommodate one associated tile of values. To "truck along" - * more than one tile of values, simply perform a key-value sort of the keys paired - * with a temporary value array that enumerates the key indices. The reordered indices - * can then be used as a gather-vector for exchanging other associated tile data through - * shared memory. - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sort of 512 integer keys and values that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive pairs. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each - * typedef cub::BlockRadixSort BlockRadixSort; - * - * // Allocate shared memory for BlockRadixSort - * __shared__ typename BlockRadixSort::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_keys[4]; - * int thread_values[4]; - * ... - * - * // Collectively sort the keys and values among block threads - * BlockRadixSort(temp_storage).Sort(thread_keys, thread_values); - * - * \endcode - * \par - * Suppose the set of input \p thread_keys across the block of threads is - * { [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }. The - * corresponding output \p thread_keys in those threads will be - * { [511,510,509,508], [11,10,9,8], [7,6,5,4], ..., [3,2,1,0] }. - * - */ - __device__ __forceinline__ void SortDescending( - KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort - ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort - int begin_bit = 0, ///< [in] [optional] The beginning (least-significant) bit index needed for key comparison - int end_bit = sizeof(KeyT) * 8) ///< [in] [optional] The past-the-end (most-significant) bit index needed for key comparison - { - SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); - } - - - //@} end member group - /******************************************************************//** - * \name Sorting (blocked arrangement -> striped arrangement) - *********************************************************************/ - //@{ - - - /** - * \brief Performs an ascending radix sort across a [blocked arrangement](index.html#sec5sec3) of keys, leaving them in a [striped arrangement](index.html#sec5sec3). - * - * \par - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sort of 512 integer keys that - * are initially partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive keys. The final partitioning is striped. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each - * typedef cub::BlockRadixSort BlockRadixSort; - * - * // Allocate shared memory for BlockRadixSort - * __shared__ typename BlockRadixSort::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_keys[4]; - * ... - * - * // Collectively sort the keys - * BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys); - * - * \endcode - * \par - * Suppose the set of input \p thread_keys across the block of threads is - * { [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }. The - * corresponding output \p thread_keys in those threads will be - * { [0,128,256,384], [1,129,257,385], [2,130,258,386], ..., [127,255,383,511] }. - * - */ - __device__ __forceinline__ void SortBlockedToStriped( - KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort - int begin_bit = 0, ///< [in] [optional] The beginning (least-significant) bit index needed for key comparison - int end_bit = sizeof(KeyT) * 8) ///< [in] [optional] The past-the-end (most-significant) bit index needed for key comparison - { - NullType values[ITEMS_PER_THREAD]; - - SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); - } - - - /** - * \brief Performs an ascending radix sort across a [blocked arrangement](index.html#sec5sec3) of keys and values, leaving them in a [striped arrangement](index.html#sec5sec3). - * - * \par - * - BlockRadixSort can only accommodate one associated tile of values. To "truck along" - * more than one tile of values, simply perform a key-value sort of the keys paired - * with a temporary value array that enumerates the key indices. The reordered indices - * can then be used as a gather-vector for exchanging other associated tile data through - * shared memory. - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sort of 512 integer keys and values that - * are initially partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive pairs. The final partitioning is striped. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each - * typedef cub::BlockRadixSort BlockRadixSort; - * - * // Allocate shared memory for BlockRadixSort - * __shared__ typename BlockRadixSort::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_keys[4]; - * int thread_values[4]; - * ... - * - * // Collectively sort the keys and values among block threads - * BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys, thread_values); - * - * \endcode - * \par - * Suppose the set of input \p thread_keys across the block of threads is - * { [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }. The - * corresponding output \p thread_keys in those threads will be - * { [0,128,256,384], [1,129,257,385], [2,130,258,386], ..., [127,255,383,511] }. - * - */ - __device__ __forceinline__ void SortBlockedToStriped( - KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort - ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort - int begin_bit = 0, ///< [in] [optional] The beginning (least-significant) bit index needed for key comparison - int end_bit = sizeof(KeyT) * 8) ///< [in] [optional] The past-the-end (most-significant) bit index needed for key comparison - { - SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); - } - - - /** - * \brief Performs a descending radix sort across a [blocked arrangement](index.html#sec5sec3) of keys, leaving them in a [striped arrangement](index.html#sec5sec3). - * - * \par - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sort of 512 integer keys that - * are initially partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive keys. The final partitioning is striped. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each - * typedef cub::BlockRadixSort BlockRadixSort; - * - * // Allocate shared memory for BlockRadixSort - * __shared__ typename BlockRadixSort::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_keys[4]; - * ... - * - * // Collectively sort the keys - * BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys); - * - * \endcode - * \par - * Suppose the set of input \p thread_keys across the block of threads is - * { [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }. The - * corresponding output \p thread_keys in those threads will be - * { [511,383,255,127], [386,258,130,2], [385,257,128,1], ..., [384,256,128,0] }. - * - */ - __device__ __forceinline__ void SortDescendingBlockedToStriped( - KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort - int begin_bit = 0, ///< [in] [optional] The beginning (least-significant) bit index needed for key comparison - int end_bit = sizeof(KeyT) * 8) ///< [in] [optional] The past-the-end (most-significant) bit index needed for key comparison - { - NullType values[ITEMS_PER_THREAD]; - - SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); - } - - - /** - * \brief Performs a descending radix sort across a [blocked arrangement](index.html#sec5sec3) of keys and values, leaving them in a [striped arrangement](index.html#sec5sec3). - * - * \par - * - BlockRadixSort can only accommodate one associated tile of values. To "truck along" - * more than one tile of values, simply perform a key-value sort of the keys paired - * with a temporary value array that enumerates the key indices. The reordered indices - * can then be used as a gather-vector for exchanging other associated tile data through - * shared memory. - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sort of 512 integer keys and values that - * are initially partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive pairs. The final partitioning is striped. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each - * typedef cub::BlockRadixSort BlockRadixSort; - * - * // Allocate shared memory for BlockRadixSort - * __shared__ typename BlockRadixSort::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_keys[4]; - * int thread_values[4]; - * ... - * - * // Collectively sort the keys and values among block threads - * BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys, thread_values); - * - * \endcode - * \par - * Suppose the set of input \p thread_keys across the block of threads is - * { [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }. The - * corresponding output \p thread_keys in those threads will be - * { [511,383,255,127], [386,258,130,2], [385,257,128,1], ..., [384,256,128,0] }. - * - */ - __device__ __forceinline__ void SortDescendingBlockedToStriped( - KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort - ValueT (&values)[ITEMS_PER_THREAD], ///< [in-out] Values to sort - int begin_bit = 0, ///< [in] [optional] The beginning (least-significant) bit index needed for key comparison - int end_bit = sizeof(KeyT) * 8) ///< [in] [optional] The past-the-end (most-significant) bit index needed for key comparison - { - SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); - } - - - //@} end member group - -}; - -/** - * \example example_block_radix_sort.cu - */ - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/block_raking_layout.cuh b/ml-xgboost/cub/cub/block/block_raking_layout.cuh deleted file mode 100644 index 9997eca..0000000 --- a/ml-xgboost/cub/cub/block/block_raking_layout.cuh +++ /dev/null @@ -1,153 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::BlockRakingLayout provides a conflict-free shared memory layout abstraction for warp-raking across thread block data. - */ - - -#pragma once - -#include "../util_macro.cuh" -#include "../util_arch.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief BlockRakingLayout provides a conflict-free shared memory layout abstraction for 1D raking across thread block data. ![](raking.png) - * \ingroup BlockModule - * - * \par Overview - * This type facilitates a shared memory usage pattern where a block of CUDA - * threads places elements into shared memory and then reduces the active - * parallelism to one "raking" warp of threads for serially aggregating consecutive - * sequences of shared items. Padding is inserted to eliminate bank conflicts - * (for most data types). - * - * \tparam T The data type to be exchanged. - * \tparam BLOCK_THREADS The thread block size in threads. - * \tparam PTX_ARCH [optional] \ptxversion - */ -template < - typename T, - int BLOCK_THREADS, - int PTX_ARCH = CUB_PTX_ARCH> -struct BlockRakingLayout -{ - //--------------------------------------------------------------------- - // Constants and type definitions - //--------------------------------------------------------------------- - - enum - { - /// The total number of elements that need to be cooperatively reduced - SHARED_ELEMENTS = BLOCK_THREADS, - - /// Maximum number of warp-synchronous raking threads - MAX_RAKING_THREADS = CUB_MIN(BLOCK_THREADS, CUB_WARP_THREADS(PTX_ARCH)), - - /// Number of raking elements per warp-synchronous raking thread (rounded up) - SEGMENT_LENGTH = (SHARED_ELEMENTS + MAX_RAKING_THREADS - 1) / MAX_RAKING_THREADS, - - /// Never use a raking thread that will have no valid data (e.g., when BLOCK_THREADS is 62 and SEGMENT_LENGTH is 2, we should only use 31 raking threads) - RAKING_THREADS = (SHARED_ELEMENTS + SEGMENT_LENGTH - 1) / SEGMENT_LENGTH, - - /// Whether we will have bank conflicts (technically we should find out if the GCD is > 1) - HAS_CONFLICTS = (CUB_SMEM_BANKS(PTX_ARCH) % SEGMENT_LENGTH == 0), - - /// Degree of bank conflicts (e.g., 4-way) - CONFLICT_DEGREE = (HAS_CONFLICTS) ? - (MAX_RAKING_THREADS * SEGMENT_LENGTH) / CUB_SMEM_BANKS(PTX_ARCH) : - 1, - - /// Pad each segment length with one element if degree of bank conflicts is greater than 4-way (heuristic) - SEGMENT_PADDING = (CONFLICT_DEGREE > CUB_PREFER_CONFLICT_OVER_PADDING(PTX_ARCH)) ? 1 : 0, -// SEGMENT_PADDING = (HAS_CONFLICTS) ? 1 : 0, - - /// Total number of elements in the raking grid - GRID_ELEMENTS = RAKING_THREADS * (SEGMENT_LENGTH + SEGMENT_PADDING), - - /// Whether or not we need bounds checking during raking (the number of reduction elements is not a multiple of the number of raking threads) - UNGUARDED = (SHARED_ELEMENTS % RAKING_THREADS == 0), - }; - - - /** - * \brief Shared memory storage type - */ - struct __align__(16) _TempStorage - { - T buff[BlockRakingLayout::GRID_ELEMENTS]; - }; - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /** - * \brief Returns the location for the calling thread to place data into the grid - */ - static __device__ __forceinline__ T* PlacementPtr( - TempStorage &temp_storage, - unsigned int linear_tid) - { - // Offset for partial - unsigned int offset = linear_tid; - - // Add in one padding element for every segment - if (SEGMENT_PADDING > 0) - { - offset += offset / SEGMENT_LENGTH; - } - - // Incorporating a block of padding partials every shared memory segment - return temp_storage.Alias().buff + offset; - } - - - /** - * \brief Returns the location for the calling thread to begin sequential raking - */ - static __device__ __forceinline__ T* RakingPtr( - TempStorage &temp_storage, - unsigned int linear_tid) - { - return temp_storage.Alias().buff + (linear_tid * (SEGMENT_LENGTH + SEGMENT_PADDING)); - } -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/block_reduce.cuh b/ml-xgboost/cub/cub/block/block_reduce.cuh deleted file mode 100644 index aa7921e..0000000 --- a/ml-xgboost/cub/cub/block/block_reduce.cuh +++ /dev/null @@ -1,607 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::BlockReduce class provides [collective](index.html#sec0) methods for computing a parallel reduction of items partitioned across a CUDA thread block. - */ - -#pragma once - -#include "specializations/block_reduce_raking.cuh" -#include "specializations/block_reduce_raking_commutative_only.cuh" -#include "specializations/block_reduce_warp_reductions.cuh" -#include "../util_ptx.cuh" -#include "../util_type.cuh" -#include "../thread/thread_operators.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - - -/****************************************************************************** - * Algorithmic variants - ******************************************************************************/ - -/** - * BlockReduceAlgorithm enumerates alternative algorithms for parallel - * reduction across a CUDA threadblock. - */ -enum BlockReduceAlgorithm -{ - - /** - * \par Overview - * An efficient "raking" reduction algorithm that only supports commutative - * reduction operators (true for most operations, e.g., addition). - * - * \par - * Execution is comprised of three phases: - * -# Upsweep sequential reduction in registers (if threads contribute more - * than one input each). Threads in warps other than the first warp place - * their partial reductions into shared memory. - * -# Upsweep sequential reduction in shared memory. Threads within the first - * warp continue to accumulate by raking across segments of shared partial reductions - * -# A warp-synchronous Kogge-Stone style reduction within the raking warp. - * - * \par - * \image html block_reduce.png - *
\p BLOCK_REDUCE_RAKING data flow for a hypothetical 16-thread threadblock and 4-thread raking warp.
- * - * \par Performance Considerations - * - This variant performs less communication than BLOCK_REDUCE_RAKING_NON_COMMUTATIVE - * and is preferable when the reduction operator is commutative. This variant - * applies fewer reduction operators than BLOCK_REDUCE_WARP_REDUCTIONS, and can provide higher overall - * throughput across the GPU when suitably occupied. However, turn-around latency may be - * higher than to BLOCK_REDUCE_WARP_REDUCTIONS and thus less-desirable - * when the GPU is under-occupied. - */ - BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, - - - /** - * \par Overview - * An efficient "raking" reduction algorithm that supports commutative - * (e.g., addition) and non-commutative (e.g., string concatenation) reduction - * operators. \blocked. - * - * \par - * Execution is comprised of three phases: - * -# Upsweep sequential reduction in registers (if threads contribute more - * than one input each). Each thread then places the partial reduction - * of its item(s) into shared memory. - * -# Upsweep sequential reduction in shared memory. Threads within a - * single warp rake across segments of shared partial reductions. - * -# A warp-synchronous Kogge-Stone style reduction within the raking warp. - * - * \par - * \image html block_reduce.png - *
\p BLOCK_REDUCE_RAKING data flow for a hypothetical 16-thread threadblock and 4-thread raking warp.
- * - * \par Performance Considerations - * - This variant performs more communication than BLOCK_REDUCE_RAKING - * and is only preferable when the reduction operator is non-commutative. This variant - * applies fewer reduction operators than BLOCK_REDUCE_WARP_REDUCTIONS, and can provide higher overall - * throughput across the GPU when suitably occupied. However, turn-around latency may be - * higher than to BLOCK_REDUCE_WARP_REDUCTIONS and thus less-desirable - * when the GPU is under-occupied. - */ - BLOCK_REDUCE_RAKING, - - - /** - * \par Overview - * A quick "tiled warp-reductions" reduction algorithm that supports commutative - * (e.g., addition) and non-commutative (e.g., string concatenation) reduction - * operators. - * - * \par - * Execution is comprised of four phases: - * -# Upsweep sequential reduction in registers (if threads contribute more - * than one input each). Each thread then places the partial reduction - * of its item(s) into shared memory. - * -# Compute a shallow, but inefficient warp-synchronous Kogge-Stone style - * reduction within each warp. - * -# A propagation phase where the warp reduction outputs in each warp are - * updated with the aggregate from each preceding warp. - * - * \par - * \image html block_scan_warpscans.png - *
\p BLOCK_REDUCE_WARP_REDUCTIONS data flow for a hypothetical 16-thread threadblock and 4-thread raking warp.
- * - * \par Performance Considerations - * - This variant applies more reduction operators than BLOCK_REDUCE_RAKING - * or BLOCK_REDUCE_RAKING_NON_COMMUTATIVE, which may result in lower overall - * throughput across the GPU. However turn-around latency may be lower and - * thus useful when the GPU is under-occupied. - */ - BLOCK_REDUCE_WARP_REDUCTIONS, -}; - - -/****************************************************************************** - * Block reduce - ******************************************************************************/ - -/** - * \brief The BlockReduce class provides [collective](index.html#sec0) methods for computing a parallel reduction of items partitioned across a CUDA thread block. ![](reduce_logo.png) - * \ingroup BlockModule - * - * \tparam T Data type being reduced - * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension - * \tparam ALGORITHM [optional] cub::BlockReduceAlgorithm enumerator specifying the underlying algorithm to use (default: cub::BLOCK_REDUCE_WARP_REDUCTIONS) - * \tparam BLOCK_DIM_Y [optional] The thread block length in threads along the Y dimension (default: 1) - * \tparam BLOCK_DIM_Z [optional] The thread block length in threads along the Z dimension (default: 1) - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * - A reduction (or fold) - * uses a binary combining operator to compute a single aggregate from a list of input elements. - * - \rowmajor - * - BlockReduce can be optionally specialized by algorithm to accommodate different latency/throughput workload profiles: - * -# cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY. An efficient "raking" reduction algorithm that only supports commutative reduction operators. [More...](\ref cub::BlockReduceAlgorithm) - * -# cub::BLOCK_REDUCE_RAKING. An efficient "raking" reduction algorithm that supports commutative and non-commutative reduction operators. [More...](\ref cub::BlockReduceAlgorithm) - * -# cub::BLOCK_REDUCE_WARP_REDUCTIONS. A quick "tiled warp-reductions" reduction algorithm that supports commutative and non-commutative reduction operators. [More...](\ref cub::BlockReduceAlgorithm) - * - * \par Performance Considerations - * - \granularity - * - Very efficient (only one synchronization barrier). - * - Incurs zero bank conflicts for most types - * - Computation is slightly more efficient (i.e., having lower instruction overhead) for: - * - Summation (vs. generic reduction) - * - \p BLOCK_THREADS is a multiple of the architecture's warp size - * - Every thread has a valid input (i.e., full vs. partial-tiles) - * - See cub::BlockReduceAlgorithm for performance details regarding algorithmic alternatives - * - * \par A Simple Example - * \blockcollective{BlockReduce} - * \par - * The code snippet below illustrates a sum reduction of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockReduce for a 1D block of 128 threads on type int - * typedef cub::BlockReduce BlockReduce; - * - * // Allocate shared memory for BlockReduce - * __shared__ typename BlockReduce::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Compute the block-wide sum for thread0 - * int aggregate = BlockReduce(temp_storage).Sum(thread_data); - * - * \endcode - * - */ -template < - typename T, - int BLOCK_DIM_X, - BlockReduceAlgorithm ALGORITHM = BLOCK_REDUCE_WARP_REDUCTIONS, - int BLOCK_DIM_Y = 1, - int BLOCK_DIM_Z = 1, - int PTX_ARCH = CUB_PTX_ARCH> -class BlockReduce -{ -private: - - /****************************************************************************** - * Constants and type definitions - ******************************************************************************/ - - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - }; - - typedef BlockReduceWarpReductions WarpReductions; - typedef BlockReduceRakingCommutativeOnly RakingCommutativeOnly; - typedef BlockReduceRaking Raking; - - /// Internal specialization type - typedef typename If<(ALGORITHM == BLOCK_REDUCE_WARP_REDUCTIONS), - WarpReductions, - typename If<(ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY), - RakingCommutativeOnly, - Raking>::Type>::Type InternalBlockReduce; // BlockReduceRaking - - /// Shared memory storage layout type for BlockReduce - typedef typename InternalBlockReduce::TempStorage _TempStorage; - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /// Internal storage allocator - __device__ __forceinline__ _TempStorage& PrivateStorage() - { - __shared__ _TempStorage private_storage; - return private_storage; - } - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Shared storage reference - _TempStorage &temp_storage; - - /// Linear thread-id - unsigned int linear_tid; - - -public: - - /// \smemstorage{BlockReduce} - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using a private static allocation of shared memory as temporary storage. - */ - __device__ __forceinline__ BlockReduce() - : - temp_storage(PrivateStorage()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. - */ - __device__ __forceinline__ BlockReduce( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - //@} end member group - /******************************************************************//** - * \name Generic reductions - *********************************************************************/ - //@{ - - - /** - * \brief Computes a block-wide reduction for thread0 using the specified binary reduction functor. Each thread contributes one input element. - * - * \par - * - The return value is undefined in threads other than thread0. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a max reduction of 128 integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockReduce for a 1D block of 128 threads on type int - * typedef cub::BlockReduce BlockReduce; - * - * // Allocate shared memory for BlockReduce - * __shared__ typename BlockReduce::TempStorage temp_storage; - * - * // Each thread obtains an input item - * int thread_data; - * ... - * - * // Compute the block-wide max for thread0 - * int aggregate = BlockReduce(temp_storage).Reduce(thread_data, cub::Max()); - * - * \endcode - * - * \tparam ReductionOp [inferred] Binary reduction functor type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ T Reduce( - T input, ///< [in] Calling thread's input - ReductionOp reduction_op) ///< [in] Binary reduction functor - { - return InternalBlockReduce(temp_storage).template Reduce(input, BLOCK_THREADS, reduction_op); - } - - - /** - * \brief Computes a block-wide reduction for thread0 using the specified binary reduction functor. Each thread contributes an array of consecutive input elements. - * - * \par - * - The return value is undefined in threads other than thread0. - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a max reduction of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockReduce for a 1D block of 128 threads on type int - * typedef cub::BlockReduce BlockReduce; - * - * // Allocate shared memory for BlockReduce - * __shared__ typename BlockReduce::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Compute the block-wide max for thread0 - * int aggregate = BlockReduce(temp_storage).Reduce(thread_data, cub::Max()); - * - * \endcode - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam ReductionOp [inferred] Binary reduction functor type having member T operator()(const T &a, const T &b) - */ - template < - int ITEMS_PER_THREAD, - typename ReductionOp> - __device__ __forceinline__ T Reduce( - T (&inputs)[ITEMS_PER_THREAD], ///< [in] Calling thread's input segment - ReductionOp reduction_op) ///< [in] Binary reduction functor - { - // Reduce partials - T partial = ThreadReduce(inputs, reduction_op); - return Reduce(partial, reduction_op); - } - - - /** - * \brief Computes a block-wide reduction for thread0 using the specified binary reduction functor. The first \p num_valid threads each contribute one input element. - * - * \par - * - The return value is undefined in threads other than thread0. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a max reduction of a partially-full tile of integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int num_valid, ...) - * { - * // Specialize BlockReduce for a 1D block of 128 threads on type int - * typedef cub::BlockReduce BlockReduce; - * - * // Allocate shared memory for BlockReduce - * __shared__ typename BlockReduce::TempStorage temp_storage; - * - * // Each thread obtains an input item - * int thread_data; - * if (threadIdx.x < num_valid) thread_data = ... - * - * // Compute the block-wide max for thread0 - * int aggregate = BlockReduce(temp_storage).Reduce(thread_data, cub::Max(), num_valid); - * - * \endcode - * - * \tparam ReductionOp [inferred] Binary reduction functor type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ T Reduce( - T input, ///< [in] Calling thread's input - ReductionOp reduction_op, ///< [in] Binary reduction functor - int num_valid) ///< [in] Number of threads containing valid elements (may be less than BLOCK_THREADS) - { - // Determine if we scan skip bounds checking - if (num_valid >= BLOCK_THREADS) - { - return InternalBlockReduce(temp_storage).template Reduce(input, num_valid, reduction_op); - } - else - { - return InternalBlockReduce(temp_storage).template Reduce(input, num_valid, reduction_op); - } - } - - - //@} end member group - /******************************************************************//** - * \name Summation reductions - *********************************************************************/ - //@{ - - - /** - * \brief Computes a block-wide reduction for thread0 using addition (+) as the reduction operator. Each thread contributes one input element. - * - * \par - * - The return value is undefined in threads other than thread0. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sum reduction of 128 integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockReduce for a 1D block of 128 threads on type int - * typedef cub::BlockReduce BlockReduce; - * - * // Allocate shared memory for BlockReduce - * __shared__ typename BlockReduce::TempStorage temp_storage; - * - * // Each thread obtains an input item - * int thread_data; - * ... - * - * // Compute the block-wide sum for thread0 - * int aggregate = BlockReduce(temp_storage).Sum(thread_data); - * - * \endcode - * - */ - __device__ __forceinline__ T Sum( - T input) ///< [in] Calling thread's input - { - return InternalBlockReduce(temp_storage).template Sum(input, BLOCK_THREADS); - } - - /** - * \brief Computes a block-wide reduction for thread0 using addition (+) as the reduction operator. Each thread contributes an array of consecutive input elements. - * - * \par - * - The return value is undefined in threads other than thread0. - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sum reduction of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockReduce for a 1D block of 128 threads on type int - * typedef cub::BlockReduce BlockReduce; - * - * // Allocate shared memory for BlockReduce - * __shared__ typename BlockReduce::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Compute the block-wide sum for thread0 - * int aggregate = BlockReduce(temp_storage).Sum(thread_data); - * - * \endcode - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - */ - template - __device__ __forceinline__ T Sum( - T (&inputs)[ITEMS_PER_THREAD]) ///< [in] Calling thread's input segment - { - // Reduce partials - T partial = ThreadReduce(inputs, cub::Sum()); - return Sum(partial); - } - - - /** - * \brief Computes a block-wide reduction for thread0 using addition (+) as the reduction operator. The first \p num_valid threads each contribute one input element. - * - * \par - * - The return value is undefined in threads other than thread0. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sum reduction of a partially-full tile of integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int num_valid, ...) - * { - * // Specialize BlockReduce for a 1D block of 128 threads on type int - * typedef cub::BlockReduce BlockReduce; - * - * // Allocate shared memory for BlockReduce - * __shared__ typename BlockReduce::TempStorage temp_storage; - * - * // Each thread obtains an input item (up to num_items) - * int thread_data; - * if (threadIdx.x < num_valid) - * thread_data = ... - * - * // Compute the block-wide sum for thread0 - * int aggregate = BlockReduce(temp_storage).Sum(thread_data, num_valid); - * - * \endcode - * - */ - __device__ __forceinline__ T Sum( - T input, ///< [in] Calling thread's input - int num_valid) ///< [in] Number of threads containing valid elements (may be less than BLOCK_THREADS) - { - // Determine if we scan skip bounds checking - if (num_valid >= BLOCK_THREADS) - { - return InternalBlockReduce(temp_storage).template Sum(input, num_valid); - } - else - { - return InternalBlockReduce(temp_storage).template Sum(input, num_valid); - } - } - - - //@} end member group -}; - -/** - * \example example_block_reduce.cu - */ - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/block_scan.cuh b/ml-xgboost/cub/cub/block/block_scan.cuh deleted file mode 100644 index 0f7aaf0..0000000 --- a/ml-xgboost/cub/cub/block/block_scan.cuh +++ /dev/null @@ -1,2126 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::BlockScan class provides [collective](index.html#sec0) methods for computing a parallel prefix sum/scan of items partitioned across a CUDA thread block. - */ - -#pragma once - -#include "specializations/block_scan_raking.cuh" -#include "specializations/block_scan_warp_scans.cuh" -#include "../util_arch.cuh" -#include "../util_type.cuh" -#include "../util_ptx.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Algorithmic variants - ******************************************************************************/ - -/** - * \brief BlockScanAlgorithm enumerates alternative algorithms for cub::BlockScan to compute a parallel prefix scan across a CUDA thread block. - */ -enum BlockScanAlgorithm -{ - - /** - * \par Overview - * An efficient "raking reduce-then-scan" prefix scan algorithm. Execution is comprised of five phases: - * -# Upsweep sequential reduction in registers (if threads contribute more than one input each). Each thread then places the partial reduction of its item(s) into shared memory. - * -# Upsweep sequential reduction in shared memory. Threads within a single warp rake across segments of shared partial reductions. - * -# A warp-synchronous Kogge-Stone style exclusive scan within the raking warp. - * -# Downsweep sequential exclusive scan in shared memory. Threads within a single warp rake across segments of shared partial reductions, seeded with the warp-scan output. - * -# Downsweep sequential scan in registers (if threads contribute more than one input), seeded with the raking scan output. - * - * \par - * \image html block_scan_raking.png - *
\p BLOCK_SCAN_RAKING data flow for a hypothetical 16-thread threadblock and 4-thread raking warp.
- * - * \par Performance Considerations - * - Although this variant may suffer longer turnaround latencies when the - * GPU is under-occupied, it can often provide higher overall throughput - * across the GPU when suitably occupied. - */ - BLOCK_SCAN_RAKING, - - - /** - * \par Overview - * Similar to cub::BLOCK_SCAN_RAKING, but with fewer shared memory reads at - * the expense of higher register pressure. Raking threads preserve their - * "upsweep" segment of values in registers while performing warp-synchronous - * scan, allowing the "downsweep" not to re-read them from shared memory. - */ - BLOCK_SCAN_RAKING_MEMOIZE, - - - /** - * \par Overview - * A quick "tiled warpscans" prefix scan algorithm. Execution is comprised of four phases: - * -# Upsweep sequential reduction in registers (if threads contribute more than one input each). Each thread then places the partial reduction of its item(s) into shared memory. - * -# Compute a shallow, but inefficient warp-synchronous Kogge-Stone style scan within each warp. - * -# A propagation phase where the warp scan outputs in each warp are updated with the aggregate from each preceding warp. - * -# Downsweep sequential scan in registers (if threads contribute more than one input), seeded with the raking scan output. - * - * \par - * \image html block_scan_warpscans.png - *
\p BLOCK_SCAN_WARP_SCANS data flow for a hypothetical 16-thread threadblock and 4-thread raking warp.
- * - * \par Performance Considerations - * - Although this variant may suffer lower overall throughput across the - * GPU because due to a heavy reliance on inefficient warpscans, it can - * often provide lower turnaround latencies when the GPU is under-occupied. - */ - BLOCK_SCAN_WARP_SCANS, -}; - - -/****************************************************************************** - * Block scan - ******************************************************************************/ - -/** - * \brief The BlockScan class provides [collective](index.html#sec0) methods for computing a parallel prefix sum/scan of items partitioned across a CUDA thread block. ![](block_scan_logo.png) - * \ingroup BlockModule - * - * \tparam T Data type being scanned - * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension - * \tparam ALGORITHM [optional] cub::BlockScanAlgorithm enumerator specifying the underlying algorithm to use (default: cub::BLOCK_SCAN_RAKING) - * \tparam BLOCK_DIM_Y [optional] The thread block length in threads along the Y dimension (default: 1) - * \tparam BLOCK_DIM_Z [optional] The thread block length in threads along the Z dimension (default: 1) - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * - Given a list of input elements and a binary reduction operator, a [prefix scan](http://en.wikipedia.org/wiki/Prefix_sum) - * produces an output list where each element is computed to be the reduction - * of the elements occurring earlier in the input list. Prefix sum - * connotes a prefix scan with the addition operator. The term \em inclusive indicates - * that the ith output reduction incorporates the ith input. - * The term \em exclusive indicates the ith input is not incorporated into - * the ith output reduction. - * - \rowmajor - * - BlockScan can be optionally specialized by algorithm to accommodate different workload profiles: - * -# cub::BLOCK_SCAN_RAKING. An efficient (high throughput) "raking reduce-then-scan" prefix scan algorithm. [More...](\ref cub::BlockScanAlgorithm) - * -# cub::BLOCK_SCAN_RAKING_MEMOIZE. Similar to cub::BLOCK_SCAN_RAKING, but having higher throughput at the expense of additional register pressure for intermediate storage. [More...](\ref cub::BlockScanAlgorithm) - * -# cub::BLOCK_SCAN_WARP_SCANS. A quick (low latency) "tiled warpscans" prefix scan algorithm. [More...](\ref cub::BlockScanAlgorithm) - * - * \par Performance Considerations - * - \granularity - * - Uses special instructions when applicable (e.g., warp \p SHFL) - * - Uses synchronization-free communication between warp lanes when applicable - * - Invokes a minimal number of minimal block-wide synchronization barriers (only - * one or two depending on algorithm selection) - * - Incurs zero bank conflicts for most types - * - Computation is slightly more efficient (i.e., having lower instruction overhead) for: - * - Prefix sum variants (vs. generic scan) - * - \blocksize - * - See cub::BlockScanAlgorithm for performance details regarding algorithmic alternatives - * - * \par A Simple Example - * \blockcollective{BlockScan} - * \par - * The code snippet below illustrates an exclusive prefix sum of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute the block-wide exclusive prefix sum - * BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * {[1,1,1,1], [1,1,1,1], ..., [1,1,1,1]}. - * The corresponding output \p thread_data in those threads will be - * {[0,1,2,3], [4,5,6,7], ..., [508,509,510,511]}. - * - */ -template < - typename T, - int BLOCK_DIM_X, - BlockScanAlgorithm ALGORITHM = BLOCK_SCAN_RAKING, - int BLOCK_DIM_Y = 1, - int BLOCK_DIM_Z = 1, - int PTX_ARCH = CUB_PTX_ARCH> -class BlockScan -{ -private: - - /****************************************************************************** - * Constants and type definitions - ******************************************************************************/ - - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - }; - - /** - * Ensure the template parameterization meets the requirements of the - * specified algorithm. Currently, the BLOCK_SCAN_WARP_SCANS policy - * cannot be used with threadblock sizes not a multiple of the - * architectural warp size. - */ - static const BlockScanAlgorithm SAFE_ALGORITHM = - ((ALGORITHM == BLOCK_SCAN_WARP_SCANS) && (BLOCK_THREADS % CUB_WARP_THREADS(PTX_ARCH) != 0)) ? - BLOCK_SCAN_RAKING : - ALGORITHM; - - typedef BlockScanWarpScans WarpScans; - typedef BlockScanRaking Raking; - - /// Define the delegate type for the desired algorithm - typedef typename If<(SAFE_ALGORITHM == BLOCK_SCAN_WARP_SCANS), - WarpScans, - Raking>::Type InternalBlockScan; - - /// Shared memory storage layout type for BlockScan - typedef typename InternalBlockScan::TempStorage _TempStorage; - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Shared storage reference - _TempStorage &temp_storage; - - /// Linear thread-id - unsigned int linear_tid; - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /// Internal storage allocator - __device__ __forceinline__ _TempStorage& PrivateStorage() - { - __shared__ _TempStorage private_storage; - return private_storage; - } - - - /****************************************************************************** - * Public types - ******************************************************************************/ -public: - - /// \smemstorage{BlockScan} - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using a private static allocation of shared memory as temporary storage. - */ - __device__ __forceinline__ BlockScan() - : - temp_storage(PrivateStorage()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. - */ - __device__ __forceinline__ BlockScan( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - - //@} end member group - /******************************************************************//** - * \name Exclusive prefix sum operations - *********************************************************************/ - //@{ - - - /** - * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. The value of 0 is applied as the initial value, and is assigned to \p output in thread0. - * - * \par - * - \identityzero - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an exclusive prefix sum of 128 integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain input item for each thread - * int thread_data; - * ... - * - * // Collectively compute the block-wide exclusive prefix sum - * BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is 1, 1, ..., 1. The - * corresponding output \p thread_data in those threads will be 0, 1, ..., 127. - * - */ - __device__ __forceinline__ void ExclusiveSum( - T input, ///< [in] Calling thread's input item - T &output) ///< [out] Calling thread's output item (may be aliased to \p input) - { - T initial_value = 0; - ExclusiveScan(input, output, initial_value, cub::Sum()); - } - - - /** - * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. The value of 0 is applied as the initial value, and is assigned to \p output in thread0. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - \identityzero - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an exclusive prefix sum of 128 integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain input item for each thread - * int thread_data; - * ... - * - * // Collectively compute the block-wide exclusive prefix sum - * int block_aggregate; - * BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data, block_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is 1, 1, ..., 1. The - * corresponding output \p thread_data in those threads will be 0, 1, ..., 127. - * Furthermore the value \p 128 will be stored in \p block_aggregate for all threads. - * - */ - __device__ __forceinline__ void ExclusiveSum( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - T &block_aggregate) ///< [out] block-wide aggregate reduction of input items - { - T initial_value = 0; - ExclusiveScan(input, output, initial_value, cub::Sum(), block_aggregate); - } - - - /** - * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. Instead of using 0 as the block-wide prefix, the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - \identityzero - * - The \p block_prefix_callback_op functor must implement a member function T operator()(T block_aggregate). - * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. - * The functor will be invoked by the first warp of threads in the block, however only the return value from - * lane0 is applied as the block-wide prefix. Can be stateful. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a single thread block that progressively - * computes an exclusive prefix sum over multiple "tiles" of input using a - * prefix functor to maintain a running total between block-wide scans. Each tile consists - * of 128 integer items that are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * // A stateful callback functor that maintains a running prefix to be applied - * // during consecutive scan operations. - * struct BlockPrefixCallbackOp - * { - * // Running prefix - * int running_total; - * - * // Constructor - * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} - * - * // Callback operator to be entered by the first warp of threads in the block. - * // Thread-0 is responsible for returning a value for seeding the block-wide scan. - * __device__ int operator()(int block_aggregate) - * { - * int old_prefix = running_total; - * running_total += block_aggregate; - * return old_prefix; - * } - * }; - * - * __global__ void ExampleKernel(int *d_data, int num_items, ...) - * { - * // Specialize BlockScan for a 1D block of 128 threads - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Initialize running total - * BlockPrefixCallbackOp prefix_op(0); - * - * // Have the block iterate over segments of items - * for (int block_offset = 0; block_offset < num_items; block_offset += 128) - * { - * // Load a segment of consecutive items that are blocked across threads - * int thread_data = d_data[block_offset]; - * - * // Collectively compute the block-wide exclusive prefix sum - * BlockScan(temp_storage).ExclusiveSum( - * thread_data, thread_data, prefix_op); - * CTA_SYNC(); - * - * // Store scanned items to output segment - * d_data[block_offset] = thread_data; - * } - * \endcode - * \par - * Suppose the input \p d_data is 1, 1, 1, 1, 1, 1, 1, 1, .... - * The corresponding output for the first segment will be 0, 1, ..., 127. - * The output for the second segment will be 128, 129, ..., 255. - * - * \tparam BlockPrefixCallbackOp [inferred] Call-back functor type having member T operator()(T block_aggregate) - */ - template - __device__ __forceinline__ void ExclusiveSum( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. - { - ExclusiveScan(input, output, cub::Sum(), block_prefix_callback_op); - } - - - //@} end member group - /******************************************************************//** - * \name Exclusive prefix sum operations (multiple data per thread) - *********************************************************************/ - //@{ - - - /** - * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. The value of 0 is applied as the initial value, and is assigned to \p output[0] in thread0. - * - * \par - * - \identityzero - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an exclusive prefix sum of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute the block-wide exclusive prefix sum - * BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is { [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }. The - * corresponding output \p thread_data in those threads will be { [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - */ - template - __device__ __forceinline__ void ExclusiveSum( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD]) ///< [out] Calling thread's output items (may be aliased to \p input) - { - T initial_value = 0; - ExclusiveScan(input, output, initial_value, cub::Sum()); - } - - - /** - * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. The value of 0 is applied as the initial value, and is assigned to \p output[0] in thread0. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - \identityzero - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an exclusive prefix sum of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute the block-wide exclusive prefix sum - * int block_aggregate; - * BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data, block_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is { [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }. The - * corresponding output \p thread_data in those threads will be { [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }. - * Furthermore the value \p 512 will be stored in \p block_aggregate for all threads. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - */ - template - __device__ __forceinline__ void ExclusiveSum( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - T &block_aggregate) ///< [out] block-wide aggregate reduction of input items - { - // Reduce consecutive thread items in registers - T initial_value = 0; - ExclusiveScan(input, output, initial_value, cub::Sum(), block_aggregate); - } - - - /** - * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. Instead of using 0 as the block-wide prefix, the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - \identityzero - * - The \p block_prefix_callback_op functor must implement a member function T operator()(T block_aggregate). - * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. - * The functor will be invoked by the first warp of threads in the block, however only the return value from - * lane0 is applied as the block-wide prefix. Can be stateful. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a single thread block that progressively - * computes an exclusive prefix sum over multiple "tiles" of input using a - * prefix functor to maintain a running total between block-wide scans. Each tile consists - * of 512 integer items that are partitioned in a [blocked arrangement](index.html#sec5sec3) - * across 128 threads where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * // A stateful callback functor that maintains a running prefix to be applied - * // during consecutive scan operations. - * struct BlockPrefixCallbackOp - * { - * // Running prefix - * int running_total; - * - * // Constructor - * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} - * - * // Callback operator to be entered by the first warp of threads in the block. - * // Thread-0 is responsible for returning a value for seeding the block-wide scan. - * __device__ int operator()(int block_aggregate) - * { - * int old_prefix = running_total; - * running_total += block_aggregate; - * return old_prefix; - * } - * }; - * - * __global__ void ExampleKernel(int *d_data, int num_items, ...) - * { - * // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread - * typedef cub::BlockLoad BlockLoad; - * typedef cub::BlockStore BlockStore; - * typedef cub::BlockScan BlockScan; - * - * // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan - * __shared__ union { - * typename BlockLoad::TempStorage load; - * typename BlockScan::TempStorage scan; - * typename BlockStore::TempStorage store; - * } temp_storage; - * - * // Initialize running total - * BlockPrefixCallbackOp prefix_op(0); - * - * // Have the block iterate over segments of items - * for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) - * { - * // Load a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); - * CTA_SYNC(); - * - * // Collectively compute the block-wide exclusive prefix sum - * int block_aggregate; - * BlockScan(temp_storage.scan).ExclusiveSum( - * thread_data, thread_data, prefix_op); - * CTA_SYNC(); - * - * // Store scanned items to output segment - * BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); - * CTA_SYNC(); - * } - * \endcode - * \par - * Suppose the input \p d_data is 1, 1, 1, 1, 1, 1, 1, 1, .... - * The corresponding output for the first segment will be 0, 1, 2, 3, ..., 510, 511. - * The output for the second segment will be 512, 513, 514, 515, ..., 1022, 1023. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam BlockPrefixCallbackOp [inferred] Call-back functor type having member T operator()(T block_aggregate) - */ - template < - int ITEMS_PER_THREAD, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void ExclusiveSum( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. - { - ExclusiveScan(input, output, cub::Sum(), block_prefix_callback_op); - } - - - - //@} end member group // Exclusive prefix sums - /******************************************************************//** - * \name Exclusive prefix scan operations - *********************************************************************/ - //@{ - - - /** - * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. - * - * \par - * - Supports non-commutative scan operators. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an exclusive prefix max scan of 128 integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain input item for each thread - * int thread_data; - * ... - * - * // Collectively compute the block-wide exclusive prefix max scan - * BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is 0, -1, 2, -3, ..., 126, -127. The - * corresponding output \p thread_data in those threads will be INT_MIN, 0, 0, 2, ..., 124, 126. - * - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - T initial_value, ///< [in] Initial value to seed the exclusive scan (and is assigned to \p output[0] in thread0) - ScanOp scan_op) ///< [in] Binary scan functor - { - InternalBlockScan(temp_storage).ExclusiveScan(input, output, initial_value, scan_op); - } - - - /** - * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - Supports non-commutative scan operators. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an exclusive prefix max scan of 128 integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain input item for each thread - * int thread_data; - * ... - * - * // Collectively compute the block-wide exclusive prefix max scan - * int block_aggregate; - * BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max(), block_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is 0, -1, 2, -3, ..., 126, -127. The - * corresponding output \p thread_data in those threads will be INT_MIN, 0, 0, 2, ..., 124, 126. - * Furthermore the value \p 126 will be stored in \p block_aggregate for all threads. - * - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input items - T &output, ///< [out] Calling thread's output items (may be aliased to \p input) - T initial_value, ///< [in] Initial value to seed the exclusive scan (and is assigned to \p output[0] in thread0) - ScanOp scan_op, ///< [in] Binary scan functor - T &block_aggregate) ///< [out] block-wide aggregate reduction of input items - { - InternalBlockScan(temp_storage).ExclusiveScan(input, output, initial_value, scan_op, block_aggregate); - } - - - /** - * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - The \p block_prefix_callback_op functor must implement a member function T operator()(T block_aggregate). - * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. - * The functor will be invoked by the first warp of threads in the block, however only the return value from - * lane0 is applied as the block-wide prefix. Can be stateful. - * - Supports non-commutative scan operators. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a single thread block that progressively - * computes an exclusive prefix max scan over multiple "tiles" of input using a - * prefix functor to maintain a running total between block-wide scans. Each tile consists - * of 128 integer items that are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * // A stateful callback functor that maintains a running prefix to be applied - * // during consecutive scan operations. - * struct BlockPrefixCallbackOp - * { - * // Running prefix - * int running_total; - * - * // Constructor - * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} - * - * // Callback operator to be entered by the first warp of threads in the block. - * // Thread-0 is responsible for returning a value for seeding the block-wide scan. - * __device__ int operator()(int block_aggregate) - * { - * int old_prefix = running_total; - * running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; - * return old_prefix; - * } - * }; - * - * __global__ void ExampleKernel(int *d_data, int num_items, ...) - * { - * // Specialize BlockScan for a 1D block of 128 threads - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Initialize running total - * BlockPrefixCallbackOp prefix_op(INT_MIN); - * - * // Have the block iterate over segments of items - * for (int block_offset = 0; block_offset < num_items; block_offset += 128) - * { - * // Load a segment of consecutive items that are blocked across threads - * int thread_data = d_data[block_offset]; - * - * // Collectively compute the block-wide exclusive prefix max scan - * BlockScan(temp_storage).ExclusiveScan( - * thread_data, thread_data, INT_MIN, cub::Max(), prefix_op); - * CTA_SYNC(); - * - * // Store scanned items to output segment - * d_data[block_offset] = thread_data; - * } - * \endcode - * \par - * Suppose the input \p d_data is 0, -1, 2, -3, 4, -5, .... - * The corresponding output for the first segment will be INT_MIN, 0, 0, 2, ..., 124, 126. - * The output for the second segment will be 126, 128, 128, 130, ..., 252, 254. - * - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - * \tparam BlockPrefixCallbackOp [inferred] Call-back functor type having member T operator()(T block_aggregate) - */ - template < - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan functor - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. - { - InternalBlockScan(temp_storage).ExclusiveScan(input, output, scan_op, block_prefix_callback_op); - } - - - //@} end member group // Inclusive prefix sums - /******************************************************************//** - * \name Exclusive prefix scan operations (multiple data per thread) - *********************************************************************/ - //@{ - - - /** - * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. - * - * \par - * - Supports non-commutative scan operators. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an exclusive prefix max scan of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute the block-wide exclusive prefix max scan - * BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }. - * The corresponding output \p thread_data in those threads will be - * { [INT_MIN,0,0,2], [2,4,4,6], ..., [506,508,508,510] }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template < - int ITEMS_PER_THREAD, - typename ScanOp> - __device__ __forceinline__ void ExclusiveScan( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - T initial_value, ///< [in] Initial value to seed the exclusive scan (and is assigned to \p output[0] in thread0) - ScanOp scan_op) ///< [in] Binary scan functor - { - // Reduce consecutive thread items in registers - T thread_prefix = ThreadReduce(input, scan_op); - - // Exclusive threadblock-scan - ExclusiveScan(thread_prefix, thread_prefix, initial_value, scan_op); - - // Exclusive scan in registers with prefix as seed - ThreadScanExclusive(input, output, scan_op, thread_prefix); - } - - - /** - * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - Supports non-commutative scan operators. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an exclusive prefix max scan of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute the block-wide exclusive prefix max scan - * int block_aggregate; - * BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max(), block_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is { [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }. The - * corresponding output \p thread_data in those threads will be { [INT_MIN,0,0,2], [2,4,4,6], ..., [506,508,508,510] }. - * Furthermore the value \p 510 will be stored in \p block_aggregate for all threads. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template < - int ITEMS_PER_THREAD, - typename ScanOp> - __device__ __forceinline__ void ExclusiveScan( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - T initial_value, ///< [in] Initial value to seed the exclusive scan (and is assigned to \p output[0] in thread0) - ScanOp scan_op, ///< [in] Binary scan functor - T &block_aggregate) ///< [out] block-wide aggregate reduction of input items - { - // Reduce consecutive thread items in registers - T thread_prefix = ThreadReduce(input, scan_op); - - // Exclusive threadblock-scan - ExclusiveScan(thread_prefix, thread_prefix, initial_value, scan_op, block_aggregate); - - // Exclusive scan in registers with prefix as seed - ThreadScanExclusive(input, output, scan_op, thread_prefix); - } - - - /** - * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - The \p block_prefix_callback_op functor must implement a member function T operator()(T block_aggregate). - * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. - * The functor will be invoked by the first warp of threads in the block, however only the return value from - * lane0 is applied as the block-wide prefix. Can be stateful. - * - Supports non-commutative scan operators. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a single thread block that progressively - * computes an exclusive prefix max scan over multiple "tiles" of input using a - * prefix functor to maintain a running total between block-wide scans. Each tile consists - * of 128 integer items that are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * // A stateful callback functor that maintains a running prefix to be applied - * // during consecutive scan operations. - * struct BlockPrefixCallbackOp - * { - * // Running prefix - * int running_total; - * - * // Constructor - * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} - * - * // Callback operator to be entered by the first warp of threads in the block. - * // Thread-0 is responsible for returning a value for seeding the block-wide scan. - * __device__ int operator()(int block_aggregate) - * { - * int old_prefix = running_total; - * running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; - * return old_prefix; - * } - * }; - * - * __global__ void ExampleKernel(int *d_data, int num_items, ...) - * { - * // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread - * typedef cub::BlockLoad BlockLoad; - * typedef cub::BlockStore BlockStore; - * typedef cub::BlockScan BlockScan; - * - * // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan - * __shared__ union { - * typename BlockLoad::TempStorage load; - * typename BlockScan::TempStorage scan; - * typename BlockStore::TempStorage store; - * } temp_storage; - * - * // Initialize running total - * BlockPrefixCallbackOp prefix_op(0); - * - * // Have the block iterate over segments of items - * for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) - * { - * // Load a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); - * CTA_SYNC(); - * - * // Collectively compute the block-wide exclusive prefix max scan - * BlockScan(temp_storage.scan).ExclusiveScan( - * thread_data, thread_data, INT_MIN, cub::Max(), prefix_op); - * CTA_SYNC(); - * - * // Store scanned items to output segment - * BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); - * CTA_SYNC(); - * } - * \endcode - * \par - * Suppose the input \p d_data is 0, -1, 2, -3, 4, -5, .... - * The corresponding output for the first segment will be INT_MIN, 0, 0, 2, 2, 4, ..., 508, 510. - * The output for the second segment will be 510, 512, 512, 514, 514, 516, ..., 1020, 1022. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - * \tparam BlockPrefixCallbackOp [inferred] Call-back functor type having member T operator()(T block_aggregate) - */ - template < - int ITEMS_PER_THREAD, - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void ExclusiveScan( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan functor - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. - { - // Reduce consecutive thread items in registers - T thread_prefix = ThreadReduce(input, scan_op); - - // Exclusive threadblock-scan - ExclusiveScan(thread_prefix, thread_prefix, scan_op, block_prefix_callback_op); - - // Exclusive scan in registers with prefix as seed - ThreadScanExclusive(input, output, scan_op, thread_prefix); - } - - - //@} end member group -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document no-initial-value scans - - /******************************************************************//** - * \name Exclusive prefix scan operations (no initial value, single datum per thread) - *********************************************************************/ - //@{ - - - /** - * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no initial value, the output computed for thread0 is undefined. - * - * \par - * - Supports non-commutative scan operators. - * - \rowmajor - * - \smemreuse - * - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan functor - { - InternalBlockScan(temp_storage).ExclusiveScan(input, output, scan_op); - } - - - /** - * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for thread0 is undefined. - * - * \par - * - Supports non-commutative scan operators. - * - \rowmajor - * - \smemreuse - * - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan functor - T &block_aggregate) ///< [out] block-wide aggregate reduction of input items - { - InternalBlockScan(temp_storage).ExclusiveScan(input, output, scan_op, block_aggregate); - } - - //@} end member group - /******************************************************************//** - * \name Exclusive prefix scan operations (no initial value, multiple data per thread) - *********************************************************************/ - //@{ - - - /** - * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. With no initial value, the output computed for thread0 is undefined. - * - * \par - * - Supports non-commutative scan operators. - * - \blocked - * - \granularity - * - \smemreuse - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template < - int ITEMS_PER_THREAD, - typename ScanOp> - __device__ __forceinline__ void ExclusiveScan( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan functor - { - // Reduce consecutive thread items in registers - T thread_partial = ThreadReduce(input, scan_op); - - // Exclusive threadblock-scan - ExclusiveScan(thread_partial, thread_partial, scan_op); - - // Exclusive scan in registers with prefix - ThreadScanExclusive(input, output, scan_op, thread_partial, (linear_tid != 0)); - } - - - /** - * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for thread0 is undefined. - * - * \par - * - Supports non-commutative scan operators. - * - \blocked - * - \granularity - * - \smemreuse - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template < - int ITEMS_PER_THREAD, - typename ScanOp> - __device__ __forceinline__ void ExclusiveScan( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan functor - T &block_aggregate) ///< [out] block-wide aggregate reduction of input items - { - // Reduce consecutive thread items in registers - T thread_partial = ThreadReduce(input, scan_op); - - // Exclusive threadblock-scan - ExclusiveScan(thread_partial, thread_partial, scan_op, block_aggregate); - - // Exclusive scan in registers with prefix - ThreadScanExclusive(input, output, scan_op, thread_partial, (linear_tid != 0)); - } - - - //@} end member group -#endif // DOXYGEN_SHOULD_SKIP_THIS // Do not document no-initial-value scans - - /******************************************************************//** - * \name Inclusive prefix sum operations - *********************************************************************/ - //@{ - - - /** - * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. - * - * \par - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an inclusive prefix sum of 128 integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain input item for each thread - * int thread_data; - * ... - * - * // Collectively compute the block-wide inclusive prefix sum - * BlockScan(temp_storage).InclusiveSum(thread_data, thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is 1, 1, ..., 1. The - * corresponding output \p thread_data in those threads will be 1, 2, ..., 128. - * - */ - __device__ __forceinline__ void InclusiveSum( - T input, ///< [in] Calling thread's input item - T &output) ///< [out] Calling thread's output item (may be aliased to \p input) - { - InclusiveScan(input, output, cub::Sum()); - } - - - /** - * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an inclusive prefix sum of 128 integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain input item for each thread - * int thread_data; - * ... - * - * // Collectively compute the block-wide inclusive prefix sum - * int block_aggregate; - * BlockScan(temp_storage).InclusiveSum(thread_data, thread_data, block_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is 1, 1, ..., 1. The - * corresponding output \p thread_data in those threads will be 1, 2, ..., 128. - * Furthermore the value \p 128 will be stored in \p block_aggregate for all threads. - * - */ - __device__ __forceinline__ void InclusiveSum( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - T &block_aggregate) ///< [out] block-wide aggregate reduction of input items - { - InclusiveScan(input, output, cub::Sum(), block_aggregate); - } - - - - /** - * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. Instead of using 0 as the block-wide prefix, the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - The \p block_prefix_callback_op functor must implement a member function T operator()(T block_aggregate). - * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. - * The functor will be invoked by the first warp of threads in the block, however only the return value from - * lane0 is applied as the block-wide prefix. Can be stateful. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a single thread block that progressively - * computes an inclusive prefix sum over multiple "tiles" of input using a - * prefix functor to maintain a running total between block-wide scans. Each tile consists - * of 128 integer items that are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * // A stateful callback functor that maintains a running prefix to be applied - * // during consecutive scan operations. - * struct BlockPrefixCallbackOp - * { - * // Running prefix - * int running_total; - * - * // Constructor - * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} - * - * // Callback operator to be entered by the first warp of threads in the block. - * // Thread-0 is responsible for returning a value for seeding the block-wide scan. - * __device__ int operator()(int block_aggregate) - * { - * int old_prefix = running_total; - * running_total += block_aggregate; - * return old_prefix; - * } - * }; - * - * __global__ void ExampleKernel(int *d_data, int num_items, ...) - * { - * // Specialize BlockScan for a 1D block of 128 threads - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Initialize running total - * BlockPrefixCallbackOp prefix_op(0); - * - * // Have the block iterate over segments of items - * for (int block_offset = 0; block_offset < num_items; block_offset += 128) - * { - * // Load a segment of consecutive items that are blocked across threads - * int thread_data = d_data[block_offset]; - * - * // Collectively compute the block-wide inclusive prefix sum - * BlockScan(temp_storage).InclusiveSum( - * thread_data, thread_data, prefix_op); - * CTA_SYNC(); - * - * // Store scanned items to output segment - * d_data[block_offset] = thread_data; - * } - * \endcode - * \par - * Suppose the input \p d_data is 1, 1, 1, 1, 1, 1, 1, 1, .... - * The corresponding output for the first segment will be 1, 2, ..., 128. - * The output for the second segment will be 129, 130, ..., 256. - * - * \tparam BlockPrefixCallbackOp [inferred] Call-back functor type having member T operator()(T block_aggregate) - */ - template - __device__ __forceinline__ void InclusiveSum( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. - { - InclusiveScan(input, output, cub::Sum(), block_prefix_callback_op); - } - - - //@} end member group - /******************************************************************//** - * \name Inclusive prefix sum operations (multiple data per thread) - *********************************************************************/ - //@{ - - - /** - * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. - * - * \par - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an inclusive prefix sum of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute the block-wide inclusive prefix sum - * BlockScan(temp_storage).InclusiveSum(thread_data, thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is { [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }. The - * corresponding output \p thread_data in those threads will be { [1,2,3,4], [5,6,7,8], ..., [509,510,511,512] }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - */ - template - __device__ __forceinline__ void InclusiveSum( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD]) ///< [out] Calling thread's output items (may be aliased to \p input) - { - if (ITEMS_PER_THREAD == 1) - { - InclusiveSum(input[0], output[0]); - } - else - { - // Reduce consecutive thread items in registers - Sum scan_op; - T thread_prefix = ThreadReduce(input, scan_op); - - // Exclusive threadblock-scan - ExclusiveSum(thread_prefix, thread_prefix); - - // Inclusive scan in registers with prefix as seed - ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); - } - } - - - /** - * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an inclusive prefix sum of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute the block-wide inclusive prefix sum - * int block_aggregate; - * BlockScan(temp_storage).InclusiveSum(thread_data, thread_data, block_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }. The - * corresponding output \p thread_data in those threads will be - * { [1,2,3,4], [5,6,7,8], ..., [509,510,511,512] }. - * Furthermore the value \p 512 will be stored in \p block_aggregate for all threads. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void InclusiveSum( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - T &block_aggregate) ///< [out] block-wide aggregate reduction of input items - { - if (ITEMS_PER_THREAD == 1) - { - InclusiveSum(input[0], output[0], block_aggregate); - } - else - { - // Reduce consecutive thread items in registers - Sum scan_op; - T thread_prefix = ThreadReduce(input, scan_op); - - // Exclusive threadblock-scan - ExclusiveSum(thread_prefix, thread_prefix, block_aggregate); - - // Inclusive scan in registers with prefix as seed - ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); - } - } - - - /** - * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. Instead of using 0 as the block-wide prefix, the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - The \p block_prefix_callback_op functor must implement a member function T operator()(T block_aggregate). - * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. - * The functor will be invoked by the first warp of threads in the block, however only the return value from - * lane0 is applied as the block-wide prefix. Can be stateful. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a single thread block that progressively - * computes an inclusive prefix sum over multiple "tiles" of input using a - * prefix functor to maintain a running total between block-wide scans. Each tile consists - * of 512 integer items that are partitioned in a [blocked arrangement](index.html#sec5sec3) - * across 128 threads where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * // A stateful callback functor that maintains a running prefix to be applied - * // during consecutive scan operations. - * struct BlockPrefixCallbackOp - * { - * // Running prefix - * int running_total; - * - * // Constructor - * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} - * - * // Callback operator to be entered by the first warp of threads in the block. - * // Thread-0 is responsible for returning a value for seeding the block-wide scan. - * __device__ int operator()(int block_aggregate) - * { - * int old_prefix = running_total; - * running_total += block_aggregate; - * return old_prefix; - * } - * }; - * - * __global__ void ExampleKernel(int *d_data, int num_items, ...) - * { - * // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread - * typedef cub::BlockLoad BlockLoad; - * typedef cub::BlockStore BlockStore; - * typedef cub::BlockScan BlockScan; - * - * // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan - * __shared__ union { - * typename BlockLoad::TempStorage load; - * typename BlockScan::TempStorage scan; - * typename BlockStore::TempStorage store; - * } temp_storage; - * - * // Initialize running total - * BlockPrefixCallbackOp prefix_op(0); - * - * // Have the block iterate over segments of items - * for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) - * { - * // Load a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); - * CTA_SYNC(); - * - * // Collectively compute the block-wide inclusive prefix sum - * BlockScan(temp_storage.scan).IncluisveSum( - * thread_data, thread_data, prefix_op); - * CTA_SYNC(); - * - * // Store scanned items to output segment - * BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); - * CTA_SYNC(); - * } - * \endcode - * \par - * Suppose the input \p d_data is 1, 1, 1, 1, 1, 1, 1, 1, .... - * The corresponding output for the first segment will be 1, 2, 3, 4, ..., 511, 512. - * The output for the second segment will be 513, 514, 515, 516, ..., 1023, 1024. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam BlockPrefixCallbackOp [inferred] Call-back functor type having member T operator()(T block_aggregate) - */ - template < - int ITEMS_PER_THREAD, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void InclusiveSum( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. - { - if (ITEMS_PER_THREAD == 1) - { - InclusiveSum(input[0], output[0], block_prefix_callback_op); - } - else - { - // Reduce consecutive thread items in registers - Sum scan_op; - T thread_prefix = ThreadReduce(input, scan_op); - - // Exclusive threadblock-scan - ExclusiveSum(thread_prefix, thread_prefix, block_prefix_callback_op); - - // Inclusive scan in registers with prefix as seed - ThreadScanInclusive(input, output, scan_op, thread_prefix); - } - } - - - //@} end member group - /******************************************************************//** - * \name Inclusive prefix scan operations - *********************************************************************/ - //@{ - - - /** - * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. - * - * \par - * - Supports non-commutative scan operators. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an inclusive prefix max scan of 128 integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain input item for each thread - * int thread_data; - * ... - * - * // Collectively compute the block-wide inclusive prefix max scan - * BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is 0, -1, 2, -3, ..., 126, -127. The - * corresponding output \p thread_data in those threads will be 0, 0, 2, 2, ..., 126, 126. - * - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan functor - { - InternalBlockScan(temp_storage).InclusiveScan(input, output, scan_op); - } - - - /** - * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - Supports non-commutative scan operators. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an inclusive prefix max scan of 128 integer items that - * are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain input item for each thread - * int thread_data; - * ... - * - * // Collectively compute the block-wide inclusive prefix max scan - * int block_aggregate; - * BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cub::Max(), block_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is 0, -1, 2, -3, ..., 126, -127. The - * corresponding output \p thread_data in those threads will be 0, 0, 2, 2, ..., 126, 126. - * Furthermore the value \p 126 will be stored in \p block_aggregate for all threads. - * - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan functor - T &block_aggregate) ///< [out] block-wide aggregate reduction of input items - { - InternalBlockScan(temp_storage).InclusiveScan(input, output, scan_op, block_aggregate); - } - - - /** - * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - The \p block_prefix_callback_op functor must implement a member function T operator()(T block_aggregate). - * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. - * The functor will be invoked by the first warp of threads in the block, however only the return value from - * lane0 is applied as the block-wide prefix. Can be stateful. - * - Supports non-commutative scan operators. - * - \rowmajor - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a single thread block that progressively - * computes an inclusive prefix max scan over multiple "tiles" of input using a - * prefix functor to maintain a running total between block-wide scans. Each tile consists - * of 128 integer items that are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * // A stateful callback functor that maintains a running prefix to be applied - * // during consecutive scan operations. - * struct BlockPrefixCallbackOp - * { - * // Running prefix - * int running_total; - * - * // Constructor - * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} - * - * // Callback operator to be entered by the first warp of threads in the block. - * // Thread-0 is responsible for returning a value for seeding the block-wide scan. - * __device__ int operator()(int block_aggregate) - * { - * int old_prefix = running_total; - * running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; - * return old_prefix; - * } - * }; - * - * __global__ void ExampleKernel(int *d_data, int num_items, ...) - * { - * // Specialize BlockScan for a 1D block of 128 threads - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Initialize running total - * BlockPrefixCallbackOp prefix_op(INT_MIN); - * - * // Have the block iterate over segments of items - * for (int block_offset = 0; block_offset < num_items; block_offset += 128) - * { - * // Load a segment of consecutive items that are blocked across threads - * int thread_data = d_data[block_offset]; - * - * // Collectively compute the block-wide inclusive prefix max scan - * BlockScan(temp_storage).InclusiveScan( - * thread_data, thread_data, cub::Max(), prefix_op); - * CTA_SYNC(); - * - * // Store scanned items to output segment - * d_data[block_offset] = thread_data; - * } - * \endcode - * \par - * Suppose the input \p d_data is 0, -1, 2, -3, 4, -5, .... - * The corresponding output for the first segment will be 0, 0, 2, 2, ..., 126, 126. - * The output for the second segment will be 128, 128, 130, 130, ..., 254, 254. - * - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - * \tparam BlockPrefixCallbackOp [inferred] Call-back functor type having member T operator()(T block_aggregate) - */ - template < - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan functor - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. - { - InternalBlockScan(temp_storage).InclusiveScan(input, output, scan_op, block_prefix_callback_op); - } - - - //@} end member group - /******************************************************************//** - * \name Inclusive prefix scan operations (multiple data per thread) - *********************************************************************/ - //@{ - - - /** - * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. - * - * \par - * - Supports non-commutative scan operators. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an inclusive prefix max scan of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute the block-wide inclusive prefix max scan - * BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is { [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }. The - * corresponding output \p thread_data in those threads will be { [0,0,2,2], [4,4,6,6], ..., [508,508,510,510] }. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template < - int ITEMS_PER_THREAD, - typename ScanOp> - __device__ __forceinline__ void InclusiveScan( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan functor - { - if (ITEMS_PER_THREAD == 1) - { - InclusiveScan(input[0], output[0], scan_op); - } - else - { - // Reduce consecutive thread items in registers - T thread_prefix = ThreadReduce(input, scan_op); - - // Exclusive threadblock-scan - ExclusiveScan(thread_prefix, thread_prefix, scan_op); - - // Inclusive scan in registers with prefix as seed (first thread does not seed) - ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); - } - } - - - /** - * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - Supports non-commutative scan operators. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates an inclusive prefix max scan of 512 integer items that - * are partitioned in a [blocked arrangement](index.html#sec5sec3) across 128 threads - * where each thread owns 4 consecutive items. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Specialize BlockScan for a 1D block of 128 threads on type int - * typedef cub::BlockScan BlockScan; - * - * // Allocate shared memory for BlockScan - * __shared__ typename BlockScan::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Collectively compute the block-wide inclusive prefix max scan - * int block_aggregate; - * BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cub::Max(), block_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is - * { [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }. - * The corresponding output \p thread_data in those threads will be - * { [0,0,2,2], [4,4,6,6], ..., [508,508,510,510] }. - * Furthermore the value \p 510 will be stored in \p block_aggregate for all threads. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template < - int ITEMS_PER_THREAD, - typename ScanOp> - __device__ __forceinline__ void InclusiveScan( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan functor - T &block_aggregate) ///< [out] block-wide aggregate reduction of input items - { - if (ITEMS_PER_THREAD == 1) - { - InclusiveScan(input[0], output[0], scan_op, block_aggregate); - } - else - { - // Reduce consecutive thread items in registers - T thread_prefix = ThreadReduce(input, scan_op); - - // Exclusive threadblock-scan (with no initial value) - ExclusiveScan(thread_prefix, thread_prefix, scan_op, block_aggregate); - - // Inclusive scan in registers with prefix as seed (first thread does not seed) - ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); - } - } - - - /** - * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - * - * \par - * - The \p block_prefix_callback_op functor must implement a member function T operator()(T block_aggregate). - * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. - * The functor will be invoked by the first warp of threads in the block, however only the return value from - * lane0 is applied as the block-wide prefix. Can be stateful. - * - Supports non-commutative scan operators. - * - \blocked - * - \granularity - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates a single thread block that progressively - * computes an inclusive prefix max scan over multiple "tiles" of input using a - * prefix functor to maintain a running total between block-wide scans. Each tile consists - * of 128 integer items that are partitioned across 128 threads. - * \par - * \code - * #include // or equivalently - * - * // A stateful callback functor that maintains a running prefix to be applied - * // during consecutive scan operations. - * struct BlockPrefixCallbackOp - * { - * // Running prefix - * int running_total; - * - * // Constructor - * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} - * - * // Callback operator to be entered by the first warp of threads in the block. - * // Thread-0 is responsible for returning a value for seeding the block-wide scan. - * __device__ int operator()(int block_aggregate) - * { - * int old_prefix = running_total; - * running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; - * return old_prefix; - * } - * }; - * - * __global__ void ExampleKernel(int *d_data, int num_items, ...) - * { - * // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread - * typedef cub::BlockLoad BlockLoad; - * typedef cub::BlockStore BlockStore; - * typedef cub::BlockScan BlockScan; - * - * // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan - * __shared__ union { - * typename BlockLoad::TempStorage load; - * typename BlockScan::TempStorage scan; - * typename BlockStore::TempStorage store; - * } temp_storage; - * - * // Initialize running total - * BlockPrefixCallbackOp prefix_op(0); - * - * // Have the block iterate over segments of items - * for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) - * { - * // Load a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); - * CTA_SYNC(); - * - * // Collectively compute the block-wide inclusive prefix max scan - * BlockScan(temp_storage.scan).InclusiveScan( - * thread_data, thread_data, cub::Max(), prefix_op); - * CTA_SYNC(); - * - * // Store scanned items to output segment - * BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); - * CTA_SYNC(); - * } - * \endcode - * \par - * Suppose the input \p d_data is 0, -1, 2, -3, 4, -5, .... - * The corresponding output for the first segment will be 0, 0, 2, 2, 4, 4, ..., 510, 510. - * The output for the second segment will be 512, 512, 514, 514, 516, 516, ..., 1022, 1022. - * - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - * \tparam BlockPrefixCallbackOp [inferred] Call-back functor type having member T operator()(T block_aggregate) - */ - template < - int ITEMS_PER_THREAD, - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void InclusiveScan( - T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items - T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan functor - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. - { - if (ITEMS_PER_THREAD == 1) - { - InclusiveScan(input[0], output[0], scan_op, block_prefix_callback_op); - } - else - { - // Reduce consecutive thread items in registers - T thread_prefix = ThreadReduce(input, scan_op); - - // Exclusive threadblock-scan - ExclusiveScan(thread_prefix, thread_prefix, scan_op, block_prefix_callback_op); - - // Inclusive scan in registers with prefix as seed - ThreadScanInclusive(input, output, scan_op, thread_prefix); - } - } - - //@} end member group - - -}; - -/** - * \example example_block_scan.cu - */ - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/block_shuffle.cuh b/ml-xgboost/cub/cub/block/block_shuffle.cuh deleted file mode 100644 index d605191..0000000 --- a/ml-xgboost/cub/cub/block/block_shuffle.cuh +++ /dev/null @@ -1,305 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::BlockShuffle class provides [collective](index.html#sec0) methods for shuffling data partitioned across a CUDA thread block. - */ - -#pragma once - -#include "../util_arch.cuh" -#include "../util_ptx.cuh" -#include "../util_macro.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief The BlockShuffle class provides [collective](index.html#sec0) methods for shuffling data partitioned across a CUDA thread block. - * \ingroup BlockModule - * - * \tparam T The data type to be exchanged. - * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension - * \tparam BLOCK_DIM_Y [optional] The thread block length in threads along the Y dimension (default: 1) - * \tparam BLOCK_DIM_Z [optional] The thread block length in threads along the Z dimension (default: 1) - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * It is commonplace for blocks of threads to rearrange data items between - * threads. The BlockShuffle abstraction allows threads to efficiently shift items - * either (a) up to their successor or (b) down to their predecessor. - * - */ -template < - typename T, - int BLOCK_DIM_X, - int BLOCK_DIM_Y = 1, - int BLOCK_DIM_Z = 1, - int PTX_ARCH = CUB_PTX_ARCH> -class BlockShuffle -{ -private: - - /****************************************************************************** - * Constants - ******************************************************************************/ - - enum - { - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - - LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(PTX_ARCH), - WARP_THREADS = 1 << LOG_WARP_THREADS, - WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, - }; - - /****************************************************************************** - * Type definitions - ******************************************************************************/ - - /// Shared memory storage layout type (last element from each thread's input) - struct _TempStorage - { - T prev[BLOCK_THREADS]; - T next[BLOCK_THREADS]; - }; - - -public: - - /// \smemstorage{BlockShuffle} - struct TempStorage : Uninitialized<_TempStorage> {}; - -private: - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Shared storage reference - _TempStorage &temp_storage; - - /// Linear thread-id - unsigned int linear_tid; - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /// Internal storage allocator - __device__ __forceinline__ _TempStorage& PrivateStorage() - { - __shared__ _TempStorage private_storage; - return private_storage; - } - - -public: - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using a private static allocation of shared memory as temporary storage. - */ - __device__ __forceinline__ BlockShuffle() - : - temp_storage(PrivateStorage()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. - */ - __device__ __forceinline__ BlockShuffle( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - //@} end member group - /******************************************************************//** - * \name Shuffle movement - *********************************************************************/ - //@{ - - - /** - * \brief Each threadi obtains the \p input provided by threadi+distance. The offset \p distance may be negative. - * - * \par - * - \smemreuse - */ - __device__ __forceinline__ void Offset( - T input, ///< [in] The input item from the calling thread (threadi) - T& output, ///< [out] The \p input item from the successor (or predecessor) thread threadi+distance (may be aliased to \p input). This value is only updated for for threadi when 0 <= (i + \p distance) < BLOCK_THREADS-1 - int distance = 1) ///< [in] Offset distance (may be negative) - { - temp_storage[linear_tid].prev = input; - - CTA_SYNC(); - - if ((linear_tid + distance >= 0) && (linear_tid + distance < BLOCK_THREADS)) - output = temp_storage[linear_tid + distance].prev; - } - - - /** - * \brief Each threadi obtains the \p input provided by threadi+distance. - * - * \par - * - \smemreuse - */ - __device__ __forceinline__ void Rotate( - T input, ///< [in] The calling thread's input item - T& output, ///< [out] The \p input item from thread thread(i+distance>)% (may be aliased to \p input). This value is not updated for threadBLOCK_THREADS-1 - unsigned int distance = 1) ///< [in] Offset distance (0 < \p distance < BLOCK_THREADS) - { - temp_storage[linear_tid].prev = input; - - CTA_SYNC(); - - unsigned int offset = threadIdx.x + distance; - if (offset >= BLOCK_THREADS) - offset -= BLOCK_THREADS; - - output = temp_storage[offset].prev; - } - - - /** - * \brief The thread block rotates its [blocked arrangement](index.html#sec5sec3) of \p input items, shifting it up by one item - * - * \par - * - \blocked - * - \granularity - * - \smemreuse - */ - template - __device__ __forceinline__ void Up( - T (&input)[ITEMS_PER_THREAD], ///< [in] The calling thread's input items - T (&prev)[ITEMS_PER_THREAD]) ///< [out] The corresponding predecessor items (may be aliased to \p input). The item \p prev[0] is not updated for thread0. - { - temp_storage[linear_tid].prev = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = ITEMS_PER_THREAD - 1; ITEM > 0; --ITEM) - prev[ITEM] = input[ITEM - 1]; - - - if (linear_tid > 0) - prev[0] = temp_storage[linear_tid - 1].prev; - } - - - /** - * \brief The thread block rotates its [blocked arrangement](index.html#sec5sec3) of \p input items, shifting it up by one item. All threads receive the \p input provided by threadBLOCK_THREADS-1. - * - * \par - * - \blocked - * - \granularity - * - \smemreuse - */ - template - __device__ __forceinline__ void Up( - T (&input)[ITEMS_PER_THREAD], ///< [in] The calling thread's input items - T (&prev)[ITEMS_PER_THREAD], ///< [out] The corresponding predecessor items (may be aliased to \p input). The item \p prev[0] is not updated for thread0. - T &block_suffix) ///< [out] The item \p input[ITEMS_PER_THREAD-1] from threadBLOCK_THREADS-1, provided to all threads - { - Up(input, prev); - block_suffix = temp_storage[BLOCK_THREADS - 1].prev; - } - - - /** - * \brief The thread block rotates its [blocked arrangement](index.html#sec5sec3) of \p input items, shifting it down by one item - * - * \par - * - \blocked - * - \granularity - * - \smemreuse - */ - template - __device__ __forceinline__ void Down( - T (&input)[ITEMS_PER_THREAD], ///< [in] The calling thread's input items - T (&prev)[ITEMS_PER_THREAD]) ///< [out] The corresponding predecessor items (may be aliased to \p input). The value \p prev[0] is not updated for threadBLOCK_THREADS-1. - { - temp_storage[linear_tid].prev = input[ITEMS_PER_THREAD - 1]; - - CTA_SYNC(); - - #pragma unroll - for (int ITEM = ITEMS_PER_THREAD - 1; ITEM > 0; --ITEM) - prev[ITEM] = input[ITEM - 1]; - - if (linear_tid > 0) - prev[0] = temp_storage[linear_tid - 1].prev; - } - - - /** - * \brief The thread block rotates its [blocked arrangement](index.html#sec5sec3) of input items, shifting it down by one item. All threads receive \p input[0] provided by thread0. - * - * \par - * - \blocked - * - \granularity - * - \smemreuse - */ - template - __device__ __forceinline__ void Down( - T (&input)[ITEMS_PER_THREAD], ///< [in] The calling thread's input items - T (&prev)[ITEMS_PER_THREAD], ///< [out] The corresponding predecessor items (may be aliased to \p input). The value \p prev[0] is not updated for threadBLOCK_THREADS-1. - T &block_prefix) ///< [out] The item \p input[0] from thread0, provided to all threads - { - Up(input, prev); - block_prefix = temp_storage[BLOCK_THREADS - 1].prev; - } - - //@} end member group - - -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/block_store.cuh b/ml-xgboost/cub/cub/block/block_store.cuh deleted file mode 100644 index e821f8e..0000000 --- a/ml-xgboost/cub/cub/block/block_store.cuh +++ /dev/null @@ -1,1000 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Operations for writing linear segments of data from the CUDA thread block - */ - -#pragma once - -#include - -#include "block_exchange.cuh" -#include "../util_ptx.cuh" -#include "../util_macro.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \addtogroup UtilIo - * @{ - */ - - -/******************************************************************//** - * \name Blocked arrangement I/O (direct) - *********************************************************************/ -//@{ - -/** - * \brief Store a blocked arrangement of items across a thread block into a linear segment of items. - * - * \blocked - * - * \tparam T [inferred] The data type to store. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam OutputIteratorT [inferred] The random-access iterator type for output \iterator. - */ -template < - typename T, - int ITEMS_PER_THREAD, - typename OutputIteratorT> -__device__ __forceinline__ void StoreDirectBlocked( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store -{ - OutputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD); - - // Store directly in thread-blocked order - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - thread_itr[ITEM] = items[ITEM]; - } -} - - -/** - * \brief Store a blocked arrangement of items across a thread block into a linear segment of items, guarded by range - * - * \blocked - * - * \tparam T [inferred] The data type to store. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam OutputIteratorT [inferred] The random-access iterator type for output \iterator. - */ -template < - typename T, - int ITEMS_PER_THREAD, - typename OutputIteratorT> -__device__ __forceinline__ void StoreDirectBlocked( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store - int valid_items) ///< [in] Number of valid items to write -{ - OutputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD); - - // Store directly in thread-blocked order - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if (ITEM + (linear_tid * ITEMS_PER_THREAD) < valid_items) - { - thread_itr[ITEM] = items[ITEM]; - } - } -} - - -/** - * \brief Store a blocked arrangement of items across a thread block into a linear segment of items. - * - * \blocked - * - * The output offset (\p block_ptr + \p block_offset) must be quad-item aligned, - * which is the default starting offset returned by \p cudaMalloc() - * - * \par - * The following conditions will prevent vectorization and storing will fall back to cub::BLOCK_STORE_DIRECT: - * - \p ITEMS_PER_THREAD is odd - * - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.) - * - * \tparam T [inferred] The data type to store. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * - */ -template < - typename T, - int ITEMS_PER_THREAD> -__device__ __forceinline__ void StoreDirectBlockedVectorized( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - T *block_ptr, ///< [in] Input pointer for storing from - T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store -{ - enum - { - // Maximum CUDA vector size is 4 elements - MAX_VEC_SIZE = CUB_MIN(4, ITEMS_PER_THREAD), - - // Vector size must be a power of two and an even divisor of the items per thread - VEC_SIZE = ((((MAX_VEC_SIZE - 1) & MAX_VEC_SIZE) == 0) && ((ITEMS_PER_THREAD % MAX_VEC_SIZE) == 0)) ? - MAX_VEC_SIZE : - 1, - - VECTORS_PER_THREAD = ITEMS_PER_THREAD / VEC_SIZE, - }; - - // Vector type - typedef typename CubVector::Type Vector; - - // Alias global pointer - Vector *block_ptr_vectors = reinterpret_cast(const_cast(block_ptr)); - - // Alias pointers (use "raw" array here which should get optimized away to prevent conservative PTXAS lmem spilling) - Vector raw_vector[VECTORS_PER_THREAD]; - T *raw_items = reinterpret_cast(raw_vector); - - // Copy - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - raw_items[ITEM] = items[ITEM]; - } - - // Direct-store using vector types - StoreDirectBlocked(linear_tid, block_ptr_vectors, raw_vector); -} - - - -//@} end member group -/******************************************************************//** - * \name Striped arrangement I/O (direct) - *********************************************************************/ -//@{ - - -/** - * \brief Store a striped arrangement of data across the thread block into a linear segment of items. - * - * \striped - * - * \tparam BLOCK_THREADS The thread block size in threads - * \tparam T [inferred] The data type to store. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam OutputIteratorT [inferred] The random-access iterator type for output \iterator. - */ -template < - int BLOCK_THREADS, - typename T, - int ITEMS_PER_THREAD, - typename OutputIteratorT> -__device__ __forceinline__ void StoreDirectStriped( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store -{ - OutputIteratorT thread_itr = block_itr + linear_tid; - - // Store directly in striped order - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - thread_itr[(ITEM * BLOCK_THREADS)] = items[ITEM]; - } -} - - -/** - * \brief Store a striped arrangement of data across the thread block into a linear segment of items, guarded by range - * - * \striped - * - * \tparam BLOCK_THREADS The thread block size in threads - * \tparam T [inferred] The data type to store. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam OutputIteratorT [inferred] The random-access iterator type for output \iterator. - */ -template < - int BLOCK_THREADS, - typename T, - int ITEMS_PER_THREAD, - typename OutputIteratorT> -__device__ __forceinline__ void StoreDirectStriped( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store - int valid_items) ///< [in] Number of valid items to write -{ - OutputIteratorT thread_itr = block_itr + linear_tid; - - // Store directly in striped order - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if ((ITEM * BLOCK_THREADS) + linear_tid < valid_items) - { - thread_itr[(ITEM * BLOCK_THREADS)] = items[ITEM]; - } - } -} - - - -//@} end member group -/******************************************************************//** - * \name Warp-striped arrangement I/O (direct) - *********************************************************************/ -//@{ - - -/** - * \brief Store a warp-striped arrangement of data across the thread block into a linear segment of items. - * - * \warpstriped - * - * \par Usage Considerations - * The number of threads in the thread block must be a multiple of the architecture's warp size. - * - * \tparam T [inferred] The data type to store. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam OutputIteratorT [inferred] The random-access iterator type for output \iterator. - */ -template < - typename T, - int ITEMS_PER_THREAD, - typename OutputIteratorT> -__device__ __forceinline__ void StoreDirectWarpStriped( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load -{ - int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); - int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; - int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; - - OutputIteratorT thread_itr = block_itr + warp_offset + tid; - - // Store directly in warp-striped order - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - thread_itr[(ITEM * CUB_PTX_WARP_THREADS)] = items[ITEM]; - } -} - - -/** - * \brief Store a warp-striped arrangement of data across the thread block into a linear segment of items, guarded by range - * - * \warpstriped - * - * \par Usage Considerations - * The number of threads in the thread block must be a multiple of the architecture's warp size. - * - * \tparam T [inferred] The data type to store. - * \tparam ITEMS_PER_THREAD [inferred] The number of consecutive items partitioned onto each thread. - * \tparam OutputIteratorT [inferred] The random-access iterator type for output \iterator. - */ -template < - typename T, - int ITEMS_PER_THREAD, - typename OutputIteratorT> -__device__ __forceinline__ void StoreDirectWarpStriped( - int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + linear_tid for 2D thread blocks) - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store - int valid_items) ///< [in] Number of valid items to write -{ - int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); - int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; - int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; - - OutputIteratorT thread_itr = block_itr + warp_offset + tid; - - // Store directly in warp-striped order - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if (warp_offset + tid + (ITEM * CUB_PTX_WARP_THREADS) < valid_items) - { - thread_itr[(ITEM * CUB_PTX_WARP_THREADS)] = items[ITEM]; - } - } -} - - -//@} end member group - - -/** @} */ // end group UtilIo - - -//----------------------------------------------------------------------------- -// Generic BlockStore abstraction -//----------------------------------------------------------------------------- - -/** - * \brief cub::BlockStoreAlgorithm enumerates alternative algorithms for cub::BlockStore to write a blocked arrangement of items across a CUDA thread block to a linear segment of memory. - */ -enum BlockStoreAlgorithm -{ - /** - * \par Overview - * - * A [blocked arrangement](index.html#sec5sec3) of data is written - * directly to memory. - * - * \par Performance Considerations - * - The utilization of memory transactions (coalescing) decreases as the - * access stride between threads increases (i.e., the number items per thread). - */ - BLOCK_STORE_DIRECT, - - /** - * \par Overview - * - * A [blocked arrangement](index.html#sec5sec3) of data is written directly - * to memory using CUDA's built-in vectorized stores as a coalescing optimization. - * For example, st.global.v4.s32 instructions will be generated - * when \p T = \p int and \p ITEMS_PER_THREAD % 4 == 0. - * - * \par Performance Considerations - * - The utilization of memory transactions (coalescing) remains high until the the - * access stride between threads (i.e., the number items per thread) exceeds the - * maximum vector store width (typically 4 items or 64B, whichever is lower). - * - The following conditions will prevent vectorization and writing will fall back to cub::BLOCK_STORE_DIRECT: - * - \p ITEMS_PER_THREAD is odd - * - The \p OutputIteratorT is not a simple pointer type - * - The block output offset is not quadword-aligned - * - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.) - */ - BLOCK_STORE_VECTORIZE, - - /** - * \par Overview - * A [blocked arrangement](index.html#sec5sec3) is locally - * transposed and then efficiently written to memory as a [striped arrangement](index.html#sec5sec3). - * - * \par Performance Considerations - * - The utilization of memory transactions (coalescing) remains high regardless - * of items written per thread. - * - The local reordering incurs slightly longer latencies and throughput than the - * direct cub::BLOCK_STORE_DIRECT and cub::BLOCK_STORE_VECTORIZE alternatives. - */ - BLOCK_STORE_TRANSPOSE, - - /** - * \par Overview - * A [blocked arrangement](index.html#sec5sec3) is locally - * transposed and then efficiently written to memory as a - * [warp-striped arrangement](index.html#sec5sec3) - * - * \par Usage Considerations - * - BLOCK_THREADS must be a multiple of WARP_THREADS - * - * \par Performance Considerations - * - The utilization of memory transactions (coalescing) remains high regardless - * of items written per thread. - * - The local reordering incurs slightly longer latencies and throughput than the - * direct cub::BLOCK_STORE_DIRECT and cub::BLOCK_STORE_VECTORIZE alternatives. - */ - BLOCK_STORE_WARP_TRANSPOSE, - - /** - * \par Overview - * A [blocked arrangement](index.html#sec5sec3) is locally - * transposed and then efficiently written to memory as a - * [warp-striped arrangement](index.html#sec5sec3) - * To reduce the shared memory requirement, only one warp's worth of shared - * memory is provisioned and is subsequently time-sliced among warps. - * - * \par Usage Considerations - * - BLOCK_THREADS must be a multiple of WARP_THREADS - * - * \par Performance Considerations - * - The utilization of memory transactions (coalescing) remains high regardless - * of items written per thread. - * - Provisions less shared memory temporary storage, but incurs larger - * latencies than the BLOCK_STORE_WARP_TRANSPOSE alternative. - */ - BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED, - -}; - - -/** - * \brief The BlockStore class provides [collective](index.html#sec0) data movement methods for writing a [blocked arrangement](index.html#sec5sec3) of items partitioned across a CUDA thread block to a linear segment of memory. ![](block_store_logo.png) - * \ingroup BlockModule - * \ingroup UtilIo - * - * \tparam T The type of data to be written. - * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension - * \tparam ITEMS_PER_THREAD The number of consecutive items partitioned onto each thread. - * \tparam ALGORITHM [optional] cub::BlockStoreAlgorithm tuning policy enumeration. default: cub::BLOCK_STORE_DIRECT. - * \tparam WARP_TIME_SLICING [optional] Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage). (default: false) - * \tparam BLOCK_DIM_Y [optional] The thread block length in threads along the Y dimension (default: 1) - * \tparam BLOCK_DIM_Z [optional] The thread block length in threads along the Z dimension (default: 1) - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * - The BlockStore class provides a single data movement abstraction that can be specialized - * to implement different cub::BlockStoreAlgorithm strategies. This facilitates different - * performance policies for different architectures, data types, granularity sizes, etc. - * - BlockStore can be optionally specialized by different data movement strategies: - * -# cub::BLOCK_STORE_DIRECT. A [blocked arrangement](index.html#sec5sec3) of data is written - * directly to memory. [More...](\ref cub::BlockStoreAlgorithm) - * -# cub::BLOCK_STORE_VECTORIZE. A [blocked arrangement](index.html#sec5sec3) - * of data is written directly to memory using CUDA's built-in vectorized stores as a - * coalescing optimization. [More...](\ref cub::BlockStoreAlgorithm) - * -# cub::BLOCK_STORE_TRANSPOSE. A [blocked arrangement](index.html#sec5sec3) - * is locally transposed into a [striped arrangement](index.html#sec5sec3) which is - * then written to memory. [More...](\ref cub::BlockStoreAlgorithm) - * -# cub::BLOCK_STORE_WARP_TRANSPOSE. A [blocked arrangement](index.html#sec5sec3) - * is locally transposed into a [warp-striped arrangement](index.html#sec5sec3) which is - * then written to memory. [More...](\ref cub::BlockStoreAlgorithm) - * - \rowmajor - * - * \par A Simple Example - * \blockcollective{BlockStore} - * \par - * The code snippet below illustrates the storing of a "blocked" arrangement - * of 512 integers across 128 threads (where each thread owns 4 consecutive items) - * into a linear segment of memory. The store is specialized for \p BLOCK_STORE_WARP_TRANSPOSE, - * meaning items are locally reordered among threads so that memory references will be - * efficiently coalesced using a warp-striped access pattern. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, ...) - * { - * // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockStore BlockStore; - * - * // Allocate shared memory for BlockStore - * __shared__ typename BlockStore::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Store items to linear memory - * int thread_data[4]; - * BlockStore(temp_storage).Store(d_data, thread_data); - * - * \endcode - * \par - * Suppose the set of \p thread_data across the block of threads is - * { [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }. - * The output \p d_data will be 0, 1, 2, 3, 4, 5, .... - * - */ -template < - typename T, - int BLOCK_DIM_X, - int ITEMS_PER_THREAD, - BlockStoreAlgorithm ALGORITHM = BLOCK_STORE_DIRECT, - int BLOCK_DIM_Y = 1, - int BLOCK_DIM_Z = 1, - int PTX_ARCH = CUB_PTX_ARCH> -class BlockStore -{ -private: - /****************************************************************************** - * Constants and typed definitions - ******************************************************************************/ - - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - }; - - - /****************************************************************************** - * Algorithmic variants - ******************************************************************************/ - - /// Store helper - template - struct StoreInternal; - - - /** - * BLOCK_STORE_DIRECT specialization of store helper - */ - template - struct StoreInternal - { - /// Shared memory storage layout type - typedef NullType TempStorage; - - /// Linear thread-id - int linear_tid; - - /// Constructor - __device__ __forceinline__ StoreInternal( - TempStorage &/*temp_storage*/, - int linear_tid) - : - linear_tid(linear_tid) - {} - - /// Store items into a linear segment of memory - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store - { - StoreDirectBlocked(linear_tid, block_itr, items); - } - - /// Store items into a linear segment of memory, guarded by range - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store - int valid_items) ///< [in] Number of valid items to write - { - StoreDirectBlocked(linear_tid, block_itr, items, valid_items); - } - }; - - - /** - * BLOCK_STORE_VECTORIZE specialization of store helper - */ - template - struct StoreInternal - { - /// Shared memory storage layout type - typedef NullType TempStorage; - - /// Linear thread-id - int linear_tid; - - /// Constructor - __device__ __forceinline__ StoreInternal( - TempStorage &/*temp_storage*/, - int linear_tid) - : - linear_tid(linear_tid) - {} - - /// Store items into a linear segment of memory, specialized for native pointer types (attempts vectorization) - __device__ __forceinline__ void Store( - T *block_ptr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store - { - StoreDirectBlockedVectorized(linear_tid, block_ptr, items); - } - - /// Store items into a linear segment of memory, specialized for opaque input iterators (skips vectorization) - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store - { - StoreDirectBlocked(linear_tid, block_itr, items); - } - - /// Store items into a linear segment of memory, guarded by range - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store - int valid_items) ///< [in] Number of valid items to write - { - StoreDirectBlocked(linear_tid, block_itr, items, valid_items); - } - }; - - - /** - * BLOCK_STORE_TRANSPOSE specialization of store helper - */ - template - struct StoreInternal - { - // BlockExchange utility type for keys - typedef BlockExchange BlockExchange; - - /// Shared memory storage layout type - struct _TempStorage : BlockExchange::TempStorage - { - /// Temporary storage for partially-full block guard - volatile int valid_items; - }; - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - /// Thread reference to shared storage - _TempStorage &temp_storage; - - /// Linear thread-id - int linear_tid; - - /// Constructor - __device__ __forceinline__ StoreInternal( - TempStorage &temp_storage, - int linear_tid) - : - temp_storage(temp_storage.Alias()), - linear_tid(linear_tid) - {} - - /// Store items into a linear segment of memory - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store - { - BlockExchange(temp_storage).BlockedToStriped(items); - StoreDirectStriped(linear_tid, block_itr, items); - } - - /// Store items into a linear segment of memory, guarded by range - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store - int valid_items) ///< [in] Number of valid items to write - { - BlockExchange(temp_storage).BlockedToStriped(items); - if (linear_tid == 0) - temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads - CTA_SYNC(); - StoreDirectStriped(linear_tid, block_itr, items, temp_storage.valid_items); - } - }; - - - /** - * BLOCK_STORE_WARP_TRANSPOSE specialization of store helper - */ - template - struct StoreInternal - { - enum - { - WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH) - }; - - // Assert BLOCK_THREADS must be a multiple of WARP_THREADS - CUB_STATIC_ASSERT((BLOCK_THREADS % WARP_THREADS == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS"); - - // BlockExchange utility type for keys - typedef BlockExchange BlockExchange; - - /// Shared memory storage layout type - struct _TempStorage : BlockExchange::TempStorage - { - /// Temporary storage for partially-full block guard - volatile int valid_items; - }; - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - /// Thread reference to shared storage - _TempStorage &temp_storage; - - /// Linear thread-id - int linear_tid; - - /// Constructor - __device__ __forceinline__ StoreInternal( - TempStorage &temp_storage, - int linear_tid) - : - temp_storage(temp_storage.Alias()), - linear_tid(linear_tid) - {} - - /// Store items into a linear segment of memory - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store - { - BlockExchange(temp_storage).BlockedToWarpStriped(items); - StoreDirectWarpStriped(linear_tid, block_itr, items); - } - - /// Store items into a linear segment of memory, guarded by range - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store - int valid_items) ///< [in] Number of valid items to write - { - BlockExchange(temp_storage).BlockedToWarpStriped(items); - if (linear_tid == 0) - temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads - CTA_SYNC(); - StoreDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items); - } - }; - - - /** - * BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED specialization of store helper - */ - template - struct StoreInternal - { - enum - { - WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH) - }; - - // Assert BLOCK_THREADS must be a multiple of WARP_THREADS - CUB_STATIC_ASSERT((BLOCK_THREADS % WARP_THREADS == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS"); - - // BlockExchange utility type for keys - typedef BlockExchange BlockExchange; - - /// Shared memory storage layout type - struct _TempStorage : BlockExchange::TempStorage - { - /// Temporary storage for partially-full block guard - volatile int valid_items; - }; - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - /// Thread reference to shared storage - _TempStorage &temp_storage; - - /// Linear thread-id - int linear_tid; - - /// Constructor - __device__ __forceinline__ StoreInternal( - TempStorage &temp_storage, - int linear_tid) - : - temp_storage(temp_storage.Alias()), - linear_tid(linear_tid) - {} - - /// Store items into a linear segment of memory - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store - { - BlockExchange(temp_storage).BlockedToWarpStriped(items); - StoreDirectWarpStriped(linear_tid, block_itr, items); - } - - /// Store items into a linear segment of memory, guarded by range - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store - int valid_items) ///< [in] Number of valid items to write - { - BlockExchange(temp_storage).BlockedToWarpStriped(items); - if (linear_tid == 0) - temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads - CTA_SYNC(); - StoreDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items); - } - }; - - /****************************************************************************** - * Type definitions - ******************************************************************************/ - - /// Internal load implementation to use - typedef StoreInternal InternalStore; - - - /// Shared memory storage layout type - typedef typename InternalStore::TempStorage _TempStorage; - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /// Internal storage allocator - __device__ __forceinline__ _TempStorage& PrivateStorage() - { - __shared__ _TempStorage private_storage; - return private_storage; - } - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Thread reference to shared storage - _TempStorage &temp_storage; - - /// Linear thread-id - int linear_tid; - -public: - - - /// \smemstorage{BlockStore} - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using a private static allocation of shared memory as temporary storage. - */ - __device__ __forceinline__ BlockStore() - : - temp_storage(PrivateStorage()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. - */ - __device__ __forceinline__ BlockStore( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - //@} end member group - /******************************************************************//** - * \name Data movement - *********************************************************************/ - //@{ - - - /** - * \brief Store items into a linear segment of memory. - * - * \par - * - \blocked - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the storing of a "blocked" arrangement - * of 512 integers across 128 threads (where each thread owns 4 consecutive items) - * into a linear segment of memory. The store is specialized for \p BLOCK_STORE_WARP_TRANSPOSE, - * meaning items are locally reordered among threads so that memory references will be - * efficiently coalesced using a warp-striped access pattern. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, ...) - * { - * // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockStore BlockStore; - * - * // Allocate shared memory for BlockStore - * __shared__ typename BlockStore::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Store items to linear memory - * int thread_data[4]; - * BlockStore(temp_storage).Store(d_data, thread_data); - * - * \endcode - * \par - * Suppose the set of \p thread_data across the block of threads is - * { [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }. - * The output \p d_data will be 0, 1, 2, 3, 4, 5, .... - * - */ - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD]) ///< [in] Data to store - { - InternalStore(temp_storage, linear_tid).Store(block_itr, items); - } - - /** - * \brief Store items into a linear segment of memory, guarded by range. - * - * \par - * - \blocked - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the guarded storing of a "blocked" arrangement - * of 512 integers across 128 threads (where each thread owns 4 consecutive items) - * into a linear segment of memory. The store is specialized for \p BLOCK_STORE_WARP_TRANSPOSE, - * meaning items are locally reordered among threads so that memory references will be - * efficiently coalesced using a warp-striped access pattern. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(int *d_data, int valid_items, ...) - * { - * // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each - * typedef cub::BlockStore BlockStore; - * - * // Allocate shared memory for BlockStore - * __shared__ typename BlockStore::TempStorage temp_storage; - * - * // Obtain a segment of consecutive items that are blocked across threads - * int thread_data[4]; - * ... - * - * // Store items to linear memory - * int thread_data[4]; - * BlockStore(temp_storage).Store(d_data, thread_data, valid_items); - * - * \endcode - * \par - * Suppose the set of \p thread_data across the block of threads is - * { [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] } and \p valid_items is \p 5. - * The output \p d_data will be 0, 1, 2, 3, 4, ?, ?, ?, ..., with - * only the first two threads being unmasked to store portions of valid data. - * - */ - template - __device__ __forceinline__ void Store( - OutputIteratorT block_itr, ///< [in] The thread block's base output iterator for storing to - T (&items)[ITEMS_PER_THREAD], ///< [in] Data to store - int valid_items) ///< [in] Number of valid items to write - { - InternalStore(temp_storage, linear_tid).Store(block_itr, items, valid_items); - } -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/specializations/block_histogram_atomic.cuh b/ml-xgboost/cub/cub/block/specializations/block_histogram_atomic.cuh deleted file mode 100644 index 8744efb..0000000 --- a/ml-xgboost/cub/cub/block/specializations/block_histogram_atomic.cuh +++ /dev/null @@ -1,82 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::BlockHistogramAtomic class provides atomic-based methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. - */ - -#pragma once - -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief The BlockHistogramAtomic class provides atomic-based methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. - */ -template -struct BlockHistogramAtomic -{ - /// Shared memory storage layout type - struct TempStorage {}; - - - /// Constructor - __device__ __forceinline__ BlockHistogramAtomic( - TempStorage &temp_storage) - {} - - - /// Composite data onto an existing histogram - template < - typename T, - typename CounterT, - int ITEMS_PER_THREAD> - __device__ __forceinline__ void Composite( - T (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's input values to histogram - CounterT histogram[BINS]) ///< [out] Reference to shared/device-accessible memory histogram - { - // Update histogram - #pragma unroll - for (int i = 0; i < ITEMS_PER_THREAD; ++i) - { - atomicAdd(histogram + items[i], 1); - } - } - -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/specializations/block_histogram_sort.cuh b/ml-xgboost/cub/cub/block/specializations/block_histogram_sort.cuh deleted file mode 100644 index 5f88b4f..0000000 --- a/ml-xgboost/cub/cub/block/specializations/block_histogram_sort.cuh +++ /dev/null @@ -1,226 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::BlockHistogramSort class provides sorting-based methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. - */ - -#pragma once - -#include "../../block/block_radix_sort.cuh" -#include "../../block/block_discontinuity.cuh" -#include "../../util_ptx.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - - -/** - * \brief The BlockHistogramSort class provides sorting-based methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. - */ -template < - typename T, ///< Sample type - int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension - int ITEMS_PER_THREAD, ///< The number of samples per thread - int BINS, ///< The number of bins into which histogram samples may fall - int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension - int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct BlockHistogramSort -{ - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - }; - - // Parameterize BlockRadixSort type for our thread block - typedef BlockRadixSort< - T, - BLOCK_DIM_X, - ITEMS_PER_THREAD, - NullType, - 4, - (PTX_ARCH >= 350) ? true : false, - BLOCK_SCAN_WARP_SCANS, - cudaSharedMemBankSizeFourByte, - BLOCK_DIM_Y, - BLOCK_DIM_Z, - PTX_ARCH> - BlockRadixSortT; - - // Parameterize BlockDiscontinuity type for our thread block - typedef BlockDiscontinuity< - T, - BLOCK_DIM_X, - BLOCK_DIM_Y, - BLOCK_DIM_Z, - PTX_ARCH> - BlockDiscontinuityT; - - /// Shared memory - union _TempStorage - { - // Storage for sorting bin values - typename BlockRadixSortT::TempStorage sort; - - struct - { - // Storage for detecting discontinuities in the tile of sorted bin values - typename BlockDiscontinuityT::TempStorage flag; - - // Storage for noting begin/end offsets of bin runs in the tile of sorted bin values - unsigned int run_begin[BINS]; - unsigned int run_end[BINS]; - }; - }; - - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - // Thread fields - _TempStorage &temp_storage; - unsigned int linear_tid; - - - /// Constructor - __device__ __forceinline__ BlockHistogramSort( - TempStorage &temp_storage) - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - // Discontinuity functor - struct DiscontinuityOp - { - // Reference to temp_storage - _TempStorage &temp_storage; - - // Constructor - __device__ __forceinline__ DiscontinuityOp(_TempStorage &temp_storage) : - temp_storage(temp_storage) - {} - - // Discontinuity predicate - __device__ __forceinline__ bool operator()(const T &a, const T &b, int b_index) - { - if (a != b) - { - // Note the begin/end offsets in shared storage - temp_storage.run_begin[b] = b_index; - temp_storage.run_end[a] = b_index; - - return true; - } - else - { - return false; - } - } - }; - - - // Composite data onto an existing histogram - template < - typename CounterT > - __device__ __forceinline__ void Composite( - T (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's input values to histogram - CounterT histogram[BINS]) ///< [out] Reference to shared/device-accessible memory histogram - { - enum { TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD }; - - // Sort bytes in blocked arrangement - BlockRadixSortT(temp_storage.sort).Sort(items); - - CTA_SYNC(); - - // Initialize the shared memory's run_begin and run_end for each bin - int histo_offset = 0; - - #pragma unroll - for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS) - { - temp_storage.run_begin[histo_offset + linear_tid] = TILE_SIZE; - temp_storage.run_end[histo_offset + linear_tid] = TILE_SIZE; - } - // Finish up with guarded initialization if necessary - if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS)) - { - temp_storage.run_begin[histo_offset + linear_tid] = TILE_SIZE; - temp_storage.run_end[histo_offset + linear_tid] = TILE_SIZE; - } - - CTA_SYNC(); - - int flags[ITEMS_PER_THREAD]; // unused - - // Compute head flags to demarcate contiguous runs of the same bin in the sorted tile - DiscontinuityOp flag_op(temp_storage); - BlockDiscontinuityT(temp_storage.flag).FlagHeads(flags, items, flag_op); - - // Update begin for first item - if (linear_tid == 0) temp_storage.run_begin[items[0]] = 0; - - CTA_SYNC(); - - // Composite into histogram - histo_offset = 0; - - #pragma unroll - for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS) - { - int thread_offset = histo_offset + linear_tid; - CounterT count = temp_storage.run_end[thread_offset] - temp_storage.run_begin[thread_offset]; - histogram[thread_offset] += count; - } - - // Finish up with guarded composition if necessary - if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS)) - { - int thread_offset = histo_offset + linear_tid; - CounterT count = temp_storage.run_end[thread_offset] - temp_storage.run_begin[thread_offset]; - histogram[thread_offset] += count; - } - } - -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/specializations/block_reduce_raking.cuh b/ml-xgboost/cub/cub/block/specializations/block_reduce_raking.cuh deleted file mode 100644 index 4baf7f2..0000000 --- a/ml-xgboost/cub/cub/block/specializations/block_reduce_raking.cuh +++ /dev/null @@ -1,222 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::BlockReduceRaking provides raking-based methods of parallel reduction across a CUDA thread block. Supports non-commutative reduction operators. - */ - -#pragma once - -#include "../../block/block_raking_layout.cuh" -#include "../../warp/warp_reduce.cuh" -#include "../../thread/thread_reduce.cuh" -#include "../../util_ptx.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief BlockReduceRaking provides raking-based methods of parallel reduction across a CUDA thread block. Supports non-commutative reduction operators. - * - * Supports non-commutative binary reduction operators. Unlike commutative - * reduction operators (e.g., addition), the application of a non-commutative - * reduction operator (e.g, string concatenation) across a sequence of inputs must - * honor the relative ordering of items and partial reductions when applying the - * reduction operator. - * - * Compared to the implementation of BlockReduceRaking (which does not support - * non-commutative operators), this implementation requires a few extra - * rounds of inter-thread communication. - */ -template < - typename T, ///< Data type being reduced - int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension - int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension - int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct BlockReduceRaking -{ - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - }; - - /// Layout type for padded thread block raking grid - typedef BlockRakingLayout BlockRakingLayout; - - /// WarpReduce utility type - typedef typename WarpReduce::InternalWarpReduce WarpReduce; - - /// Constants - enum - { - /// Number of raking threads - RAKING_THREADS = BlockRakingLayout::RAKING_THREADS, - - /// Number of raking elements per warp synchronous raking thread - SEGMENT_LENGTH = BlockRakingLayout::SEGMENT_LENGTH, - - /// Cooperative work can be entirely warp synchronous - WARP_SYNCHRONOUS = (RAKING_THREADS == BLOCK_THREADS), - - /// Whether or not warp-synchronous reduction should be unguarded (i.e., the warp-reduction elements is a power of two - WARP_SYNCHRONOUS_UNGUARDED = PowerOfTwo::VALUE, - - /// Whether or not accesses into smem are unguarded - RAKING_UNGUARDED = BlockRakingLayout::UNGUARDED, - - }; - - - /// Shared memory storage layout type - union _TempStorage - { - typename WarpReduce::TempStorage warp_storage; ///< Storage for warp-synchronous reduction - typename BlockRakingLayout::TempStorage raking_grid; ///< Padded threadblock raking grid - }; - - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - // Thread fields - _TempStorage &temp_storage; - unsigned int linear_tid; - - - /// Constructor - __device__ __forceinline__ BlockReduceRaking( - TempStorage &temp_storage) - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - template - __device__ __forceinline__ T RakingReduction( - ReductionOp reduction_op, ///< [in] Binary scan operator - T *raking_segment, - T partial, ///< [in] [lane0 only] Warp-wide aggregate reduction of input items - int num_valid, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) - Int2Type /*iteration*/) - { - // Update partial if addend is in range - if ((IS_FULL_TILE && RAKING_UNGUARDED) || ((linear_tid * SEGMENT_LENGTH) + ITERATION < num_valid)) - { - T addend = raking_segment[ITERATION]; - partial = reduction_op(partial, addend); - } - return RakingReduction(reduction_op, raking_segment, partial, num_valid, Int2Type()); - } - - template - __device__ __forceinline__ T RakingReduction( - ReductionOp /*reduction_op*/, ///< [in] Binary scan operator - T * /*raking_segment*/, - T partial, ///< [in] [lane0 only] Warp-wide aggregate reduction of input items - int /*num_valid*/, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) - Int2Type /*iteration*/) - { - return partial; - } - - - - /// Computes a threadblock-wide reduction using the specified reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread0. - template < - bool IS_FULL_TILE, - typename ReductionOp> - __device__ __forceinline__ T Reduce( - T partial, ///< [in] Calling thread's input partial reductions - int num_valid, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) - ReductionOp reduction_op) ///< [in] Binary reduction operator - { - if (WARP_SYNCHRONOUS) - { - // Short-circuit directly to warp synchronous reduction (unguarded if active threads is a power-of-two) - partial = WarpReduce(temp_storage.warp_storage).template Reduce( - partial, - num_valid, - reduction_op); - } - else - { - // Place partial into shared memory grid. - *BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid) = partial; - - CTA_SYNC(); - - // Reduce parallelism to one warp - if (linear_tid < RAKING_THREADS) - { - // Raking reduction in grid - T *raking_segment = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); - partial = raking_segment[0]; - - partial = RakingReduction(reduction_op, raking_segment, partial, num_valid, Int2Type<1>()); - - partial = WarpReduce(temp_storage.warp_storage).template Reduce( - partial, - num_valid, - reduction_op); - - } - } - - return partial; - } - - - /// Computes a threadblock-wide reduction using addition (+) as the reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread0. - template - __device__ __forceinline__ T Sum( - T partial, ///< [in] Calling thread's input partial reductions - int num_valid) ///< [in] Number of valid elements (may be less than BLOCK_THREADS) - { - cub::Sum reduction_op; - - return Reduce(partial, num_valid, reduction_op); - } - - - -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/specializations/block_reduce_raking_commutative_only.cuh b/ml-xgboost/cub/cub/block/specializations/block_reduce_raking_commutative_only.cuh deleted file mode 100644 index 9093236..0000000 --- a/ml-xgboost/cub/cub/block/specializations/block_reduce_raking_commutative_only.cuh +++ /dev/null @@ -1,202 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::BlockReduceRakingCommutativeOnly provides raking-based methods of parallel reduction across a CUDA thread block. Does not support non-commutative reduction operators. - */ - -#pragma once - -#include "block_reduce_raking.cuh" -#include "../../warp/warp_reduce.cuh" -#include "../../thread/thread_reduce.cuh" -#include "../../util_ptx.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief BlockReduceRakingCommutativeOnly provides raking-based methods of parallel reduction across a CUDA thread block. Does not support non-commutative reduction operators. Does not support block sizes that are not a multiple of the warp size. - */ -template < - typename T, ///< Data type being reduced - int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension - int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension - int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct BlockReduceRakingCommutativeOnly -{ - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - }; - - // The fall-back implementation to use when BLOCK_THREADS is not a multiple of the warp size or not all threads have valid values - typedef BlockReduceRaking FallBack; - - /// Constants - enum - { - /// Number of warp threads - WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), - - /// Whether or not to use fall-back - USE_FALLBACK = ((BLOCK_THREADS % WARP_THREADS != 0) || (BLOCK_THREADS <= WARP_THREADS)), - - /// Number of raking threads - RAKING_THREADS = WARP_THREADS, - - /// Number of threads actually sharing items with the raking threads - SHARING_THREADS = CUB_MAX(1, BLOCK_THREADS - RAKING_THREADS), - - /// Number of raking elements per warp synchronous raking thread - SEGMENT_LENGTH = SHARING_THREADS / WARP_THREADS, - }; - - /// WarpReduce utility type - typedef WarpReduce WarpReduce; - - /// Layout type for padded thread block raking grid - typedef BlockRakingLayout BlockRakingLayout; - - /// Shared memory storage layout type - struct _TempStorage - { - union - { - struct - { - typename WarpReduce::TempStorage warp_storage; ///< Storage for warp-synchronous reduction - typename BlockRakingLayout::TempStorage raking_grid; ///< Padded threadblock raking grid - }; - typename FallBack::TempStorage fallback_storage; ///< Fall-back storage for non-commutative block scan - }; - }; - - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - // Thread fields - _TempStorage &temp_storage; - unsigned int linear_tid; - - - /// Constructor - __device__ __forceinline__ BlockReduceRakingCommutativeOnly( - TempStorage &temp_storage) - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - /// Computes a threadblock-wide reduction using addition (+) as the reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread0. - template - __device__ __forceinline__ T Sum( - T partial, ///< [in] Calling thread's input partial reductions - int num_valid) ///< [in] Number of valid elements (may be less than BLOCK_THREADS) - { - if (USE_FALLBACK || !FULL_TILE) - { - return FallBack(temp_storage.fallback_storage).template Sum(partial, num_valid); - } - else - { - // Place partial into shared memory grid - if (linear_tid >= RAKING_THREADS) - *BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid - RAKING_THREADS) = partial; - - CTA_SYNC(); - - // Reduce parallelism to one warp - if (linear_tid < RAKING_THREADS) - { - // Raking reduction in grid - T *raking_segment = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); - partial = ThreadReduce(raking_segment, cub::Sum(), partial); - - // Warpscan - partial = WarpReduce(temp_storage.warp_storage).Sum(partial); - } - } - - return partial; - } - - - /// Computes a threadblock-wide reduction using the specified reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread0. - template < - bool FULL_TILE, - typename ReductionOp> - __device__ __forceinline__ T Reduce( - T partial, ///< [in] Calling thread's input partial reductions - int num_valid, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) - ReductionOp reduction_op) ///< [in] Binary reduction operator - { - if (USE_FALLBACK || !FULL_TILE) - { - return FallBack(temp_storage.fallback_storage).template Reduce(partial, num_valid, reduction_op); - } - else - { - // Place partial into shared memory grid - if (linear_tid >= RAKING_THREADS) - *BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid - RAKING_THREADS) = partial; - - CTA_SYNC(); - - // Reduce parallelism to one warp - if (linear_tid < RAKING_THREADS) - { - // Raking reduction in grid - T *raking_segment = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); - partial = ThreadReduce(raking_segment, reduction_op, partial); - - // Warpscan - partial = WarpReduce(temp_storage.warp_storage).Reduce(partial, reduction_op); - } - } - - return partial; - } - -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/specializations/block_reduce_warp_reductions.cuh b/ml-xgboost/cub/cub/block/specializations/block_reduce_warp_reductions.cuh deleted file mode 100644 index bcca3a8..0000000 --- a/ml-xgboost/cub/cub/block/specializations/block_reduce_warp_reductions.cuh +++ /dev/null @@ -1,222 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::BlockReduceWarpReductions provides variants of warp-reduction-based parallel reduction across a CUDA threadblock. Supports non-commutative reduction operators. - */ - -#pragma once - -#include "../../warp/warp_reduce.cuh" -#include "../../util_ptx.cuh" -#include "../../util_arch.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief BlockReduceWarpReductions provides variants of warp-reduction-based parallel reduction across a CUDA threadblock. Supports non-commutative reduction operators. - */ -template < - typename T, ///< Data type being reduced - int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension - int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension - int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct BlockReduceWarpReductions -{ - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - - /// Number of warp threads - WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), - - /// Number of active warps - WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, - - /// The logical warp size for warp reductions - LOGICAL_WARP_SIZE = CUB_MIN(BLOCK_THREADS, WARP_THREADS), - - /// Whether or not the logical warp size evenly divides the threadblock size - EVEN_WARP_MULTIPLE = (BLOCK_THREADS % LOGICAL_WARP_SIZE == 0) - }; - - - /// WarpReduce utility type - typedef typename WarpReduce::InternalWarpReduce WarpReduce; - - - /// Shared memory storage layout type - struct _TempStorage - { - typename WarpReduce::TempStorage warp_reduce[WARPS]; ///< Buffer for warp-synchronous scan - T warp_aggregates[WARPS]; ///< Shared totals from each warp-synchronous scan - T block_prefix; ///< Shared prefix for the entire threadblock - }; - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - // Thread fields - _TempStorage &temp_storage; - unsigned int linear_tid; - unsigned int warp_id; - unsigned int lane_id; - - - /// Constructor - __device__ __forceinline__ BlockReduceWarpReductions( - TempStorage &temp_storage) - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), - warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS), - lane_id(LaneId()) - {} - - - template - __device__ __forceinline__ T ApplyWarpAggregates( - ReductionOp reduction_op, ///< [in] Binary scan operator - T warp_aggregate, ///< [in] [lane0 only] Warp-wide aggregate reduction of input items - int num_valid, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) - Int2Type /*successor_warp*/) - { - if (FULL_TILE || (SUCCESSOR_WARP * LOGICAL_WARP_SIZE < num_valid)) - { - T addend = temp_storage.warp_aggregates[SUCCESSOR_WARP]; - warp_aggregate = reduction_op(warp_aggregate, addend); - } - return ApplyWarpAggregates(reduction_op, warp_aggregate, num_valid, Int2Type()); - } - - template - __device__ __forceinline__ T ApplyWarpAggregates( - ReductionOp /*reduction_op*/, ///< [in] Binary scan operator - T warp_aggregate, ///< [in] [lane0 only] Warp-wide aggregate reduction of input items - int /*num_valid*/, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) - Int2Type /*successor_warp*/) - { - return warp_aggregate; - } - - - /// Returns block-wide aggregate in thread0. - template < - bool FULL_TILE, - typename ReductionOp> - __device__ __forceinline__ T ApplyWarpAggregates( - ReductionOp reduction_op, ///< [in] Binary scan operator - T warp_aggregate, ///< [in] [lane0 only] Warp-wide aggregate reduction of input items - int num_valid) ///< [in] Number of valid elements (may be less than BLOCK_THREADS) - { - // Share lane aggregates - if (lane_id == 0) - { - temp_storage.warp_aggregates[warp_id] = warp_aggregate; - } - - CTA_SYNC(); - - // Update total aggregate in warp 0, lane 0 - if (linear_tid == 0) - { - warp_aggregate = ApplyWarpAggregates(reduction_op, warp_aggregate, num_valid, Int2Type<1>()); - } - - return warp_aggregate; - } - - - /// Computes a threadblock-wide reduction using addition (+) as the reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread0. - template - __device__ __forceinline__ T Sum( - T input, ///< [in] Calling thread's input partial reductions - int num_valid) ///< [in] Number of valid elements (may be less than BLOCK_THREADS) - { - cub::Sum reduction_op; - unsigned int warp_offset = warp_id * LOGICAL_WARP_SIZE; - unsigned int warp_num_valid = (FULL_TILE && EVEN_WARP_MULTIPLE) ? - LOGICAL_WARP_SIZE : - (warp_offset < num_valid) ? - num_valid - warp_offset : - 0; - - // Warp reduction in every warp - T warp_aggregate = WarpReduce(temp_storage.warp_reduce[warp_id]).template Reduce<(FULL_TILE && EVEN_WARP_MULTIPLE), 1>( - input, - warp_num_valid, - cub::Sum()); - - // Update outputs and block_aggregate with warp-wide aggregates from lane-0s - return ApplyWarpAggregates(reduction_op, warp_aggregate, num_valid); - } - - - /// Computes a threadblock-wide reduction using the specified reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread0. - template < - bool FULL_TILE, - typename ReductionOp> - __device__ __forceinline__ T Reduce( - T input, ///< [in] Calling thread's input partial reductions - int num_valid, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) - ReductionOp reduction_op) ///< [in] Binary reduction operator - { - unsigned int warp_offset = warp_id * LOGICAL_WARP_SIZE; - unsigned int warp_num_valid = (FULL_TILE && EVEN_WARP_MULTIPLE) ? - LOGICAL_WARP_SIZE : - (warp_offset < static_cast(num_valid)) ? - num_valid - warp_offset : - 0; - - // Warp reduction in every warp - T warp_aggregate = WarpReduce(temp_storage.warp_reduce[warp_id]).template Reduce<(FULL_TILE && EVEN_WARP_MULTIPLE), 1>( - input, - warp_num_valid, - reduction_op); - - // Update outputs and block_aggregate with warp-wide aggregates from lane-0s - return ApplyWarpAggregates(reduction_op, warp_aggregate, num_valid); - } - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/specializations/block_scan_raking.cuh b/ml-xgboost/cub/cub/block/specializations/block_scan_raking.cuh deleted file mode 100644 index c0d0f51..0000000 --- a/ml-xgboost/cub/cub/block/specializations/block_scan_raking.cuh +++ /dev/null @@ -1,666 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - - -/** - * \file - * cub::BlockScanRaking provides variants of raking-based parallel prefix scan across a CUDA threadblock. - */ - -#pragma once - -#include "../../util_ptx.cuh" -#include "../../util_arch.cuh" -#include "../../block/block_raking_layout.cuh" -#include "../../thread/thread_reduce.cuh" -#include "../../thread/thread_scan.cuh" -#include "../../warp/warp_scan.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief BlockScanRaking provides variants of raking-based parallel prefix scan across a CUDA threadblock. - */ -template < - typename T, ///< Data type being scanned - int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension - int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension - int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension - bool MEMOIZE, ///< Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct BlockScanRaking -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - }; - - /// Layout type for padded threadblock raking grid - typedef BlockRakingLayout BlockRakingLayout; - - /// Constants - enum - { - /// Number of raking threads - RAKING_THREADS = BlockRakingLayout::RAKING_THREADS, - - /// Number of raking elements per warp synchronous raking thread - SEGMENT_LENGTH = BlockRakingLayout::SEGMENT_LENGTH, - - /// Cooperative work can be entirely warp synchronous - WARP_SYNCHRONOUS = (BLOCK_THREADS == RAKING_THREADS), - }; - - /// WarpScan utility type - typedef WarpScan WarpScan; - - /// Shared memory storage layout type - struct _TempStorage - { - typename WarpScan::TempStorage warp_scan; ///< Buffer for warp-synchronous scan - typename BlockRakingLayout::TempStorage raking_grid; ///< Padded threadblock raking grid - T block_aggregate; ///< Block aggregate - }; - - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - // Thread fields - _TempStorage &temp_storage; - unsigned int linear_tid; - T cached_segment[SEGMENT_LENGTH]; - - - //--------------------------------------------------------------------- - // Utility methods - //--------------------------------------------------------------------- - - /// Templated reduction - template - __device__ __forceinline__ T GuardedReduce( - T* raking_ptr, ///< [in] Input array - ScanOp scan_op, ///< [in] Binary reduction operator - T raking_partial, ///< [in] Prefix to seed reduction with - Int2Type /*iteration*/) - { - if ((BlockRakingLayout::UNGUARDED) || (((linear_tid * SEGMENT_LENGTH) + ITERATION) < BLOCK_THREADS)) - { - T addend = raking_ptr[ITERATION]; - raking_partial = scan_op(raking_partial, addend); - } - - return GuardedReduce(raking_ptr, scan_op, raking_partial, Int2Type()); - } - - - /// Templated reduction (base case) - template - __device__ __forceinline__ T GuardedReduce( - T* /*raking_ptr*/, ///< [in] Input array - ScanOp /*scan_op*/, ///< [in] Binary reduction operator - T raking_partial, ///< [in] Prefix to seed reduction with - Int2Type /*iteration*/) - { - return raking_partial; - } - - - /// Templated copy - template - __device__ __forceinline__ void CopySegment( - T* out, ///< [out] Out array - T* in, ///< [in] Input array - Int2Type /*iteration*/) - { - out[ITERATION] = in[ITERATION]; - CopySegment(out, in, Int2Type()); - } - - - /// Templated copy (base case) - __device__ __forceinline__ void CopySegment( - T* /*out*/, ///< [out] Out array - T* /*in*/, ///< [in] Input array - Int2Type /*iteration*/) - {} - - - /// Performs upsweep raking reduction, returning the aggregate - template - __device__ __forceinline__ T Upsweep( - ScanOp scan_op) - { - T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); - - // Read data into registers - CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); - - T raking_partial = cached_segment[0]; - - return GuardedReduce(cached_segment, scan_op, raking_partial, Int2Type<1>()); - } - - - /// Performs exclusive downsweep raking scan - template - __device__ __forceinline__ void ExclusiveDownsweep( - ScanOp scan_op, - T raking_partial, - bool apply_prefix = true) - { - T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); - - // Read data back into registers - if (!MEMOIZE) - { - CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); - } - - ThreadScanExclusive(cached_segment, cached_segment, scan_op, raking_partial, apply_prefix); - - // Write data back to smem - CopySegment(smem_raking_ptr, cached_segment, Int2Type<0>()); - } - - - /// Performs inclusive downsweep raking scan - template - __device__ __forceinline__ void InclusiveDownsweep( - ScanOp scan_op, - T raking_partial, - bool apply_prefix = true) - { - T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); - - // Read data back into registers - if (!MEMOIZE) - { - CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); - } - - ThreadScanInclusive(cached_segment, cached_segment, scan_op, raking_partial, apply_prefix); - - // Write data back to smem - CopySegment(smem_raking_ptr, cached_segment, Int2Type<0>()); - } - - - //--------------------------------------------------------------------- - // Constructors - //--------------------------------------------------------------------- - - /// Constructor - __device__ __forceinline__ BlockScanRaking( - TempStorage &temp_storage) - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) - {} - - - //--------------------------------------------------------------------- - // Exclusive scans - //--------------------------------------------------------------------- - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no initial value, the output computed for thread0 is undefined. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan operator - { - if (WARP_SYNCHRONOUS) - { - // Short-circuit directly to warp-synchronous scan - WarpScan(temp_storage.warp_scan).ExclusiveScan(input, exclusive_output, scan_op); - } - else - { - // Place thread partial into shared memory raking grid - T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); - *placement_ptr = input; - - CTA_SYNC(); - - // Reduce parallelism down to just raking threads - if (linear_tid < RAKING_THREADS) - { - // Raking upsweep reduction across shared partials - T upsweep_partial = Upsweep(scan_op); - - // Warp-synchronous scan - T exclusive_partial; - WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, scan_op); - - // Exclusive raking downsweep scan - ExclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); - } - - CTA_SYNC(); - - // Grab thread prefix from shared memory - exclusive_output = *placement_ptr; - } - } - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input items - T &output, ///< [out] Calling thread's output items (may be aliased to \p input) - const T &initial_value, ///< [in] Initial value to seed the exclusive scan - ScanOp scan_op) ///< [in] Binary scan operator - { - if (WARP_SYNCHRONOUS) - { - // Short-circuit directly to warp-synchronous scan - WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, initial_value, scan_op); - } - else - { - // Place thread partial into shared memory raking grid - T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); - *placement_ptr = input; - - CTA_SYNC(); - - // Reduce parallelism down to just raking threads - if (linear_tid < RAKING_THREADS) - { - // Raking upsweep reduction across shared partials - T upsweep_partial = Upsweep(scan_op); - - // Exclusive Warp-synchronous scan - T exclusive_partial; - WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, initial_value, scan_op); - - // Exclusive raking downsweep scan - ExclusiveDownsweep(scan_op, exclusive_partial); - } - - CTA_SYNC(); - - // Grab exclusive partial from shared memory - output = *placement_ptr; - } - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for thread0 is undefined. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - if (WARP_SYNCHRONOUS) - { - // Short-circuit directly to warp-synchronous scan - WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, scan_op, block_aggregate); - } - else - { - // Place thread partial into shared memory raking grid - T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); - *placement_ptr = input; - - CTA_SYNC(); - - // Reduce parallelism down to just raking threads - if (linear_tid < RAKING_THREADS) - { - // Raking upsweep reduction across shared partials - T upsweep_partial= Upsweep(scan_op); - - // Warp-synchronous scan - T inclusive_partial; - T exclusive_partial; - WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, scan_op); - - // Exclusive raking downsweep scan - ExclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); - - // Broadcast aggregate to all threads - if (linear_tid == RAKING_THREADS - 1) - temp_storage.block_aggregate = inclusive_partial; - } - - CTA_SYNC(); - - // Grab thread prefix from shared memory - output = *placement_ptr; - - // Retrieve block aggregate - block_aggregate = temp_storage.block_aggregate; - } - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input items - T &output, ///< [out] Calling thread's output items (may be aliased to \p input) - const T &initial_value, ///< [in] Initial value to seed the exclusive scan - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - if (WARP_SYNCHRONOUS) - { - // Short-circuit directly to warp-synchronous scan - WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, initial_value, scan_op, block_aggregate); - } - else - { - // Place thread partial into shared memory raking grid - T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); - *placement_ptr = input; - - CTA_SYNC(); - - // Reduce parallelism down to just raking threads - if (linear_tid < RAKING_THREADS) - { - // Raking upsweep reduction across shared partials - T upsweep_partial = Upsweep(scan_op); - - // Warp-synchronous scan - T exclusive_partial; - WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, initial_value, scan_op, block_aggregate); - - // Exclusive raking downsweep scan - ExclusiveDownsweep(scan_op, exclusive_partial); - - // Broadcast aggregate to other threads - if (linear_tid == 0) - temp_storage.block_aggregate = block_aggregate; - } - - CTA_SYNC(); - - // Grab exclusive partial from shared memory - output = *placement_ptr; - - // Retrieve block aggregate - block_aggregate = temp_storage.block_aggregate; - } - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template < - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a threadblock-wide prefix to be applied to all inputs. - { - if (WARP_SYNCHRONOUS) - { - // Short-circuit directly to warp-synchronous scan - T block_aggregate; - WarpScan warp_scan(temp_storage.warp_scan); - warp_scan.ExclusiveScan(input, output, scan_op, block_aggregate); - - // Obtain warp-wide prefix in lane0, then broadcast to other lanes - T block_prefix = block_prefix_callback_op(block_aggregate); - block_prefix = warp_scan.Broadcast(block_prefix, 0); - - output = scan_op(block_prefix, output); - if (linear_tid == 0) - output = block_prefix; - } - else - { - // Place thread partial into shared memory raking grid - T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); - *placement_ptr = input; - - CTA_SYNC(); - - // Reduce parallelism down to just raking threads - if (linear_tid < RAKING_THREADS) - { - WarpScan warp_scan(temp_storage.warp_scan); - - // Raking upsweep reduction across shared partials - T upsweep_partial = Upsweep(scan_op); - - // Warp-synchronous scan - T exclusive_partial, block_aggregate; - warp_scan.ExclusiveScan(upsweep_partial, exclusive_partial, scan_op, block_aggregate); - - // Obtain block-wide prefix in lane0, then broadcast to other lanes - T block_prefix = block_prefix_callback_op(block_aggregate); - block_prefix = warp_scan.Broadcast(block_prefix, 0); - - // Update prefix with warpscan exclusive partial - T downsweep_prefix = scan_op(block_prefix, exclusive_partial); - if (linear_tid == 0) - downsweep_prefix = block_prefix; - - // Exclusive raking downsweep scan - ExclusiveDownsweep(scan_op, downsweep_prefix); - } - - CTA_SYNC(); - - // Grab thread prefix from shared memory - output = *placement_ptr; - } - } - - - //--------------------------------------------------------------------- - // Inclusive scans - //--------------------------------------------------------------------- - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan operator - { - if (WARP_SYNCHRONOUS) - { - // Short-circuit directly to warp-synchronous scan - WarpScan(temp_storage.warp_scan).InclusiveScan(input, output, scan_op); - } - else - { - // Place thread partial into shared memory raking grid - T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); - *placement_ptr = input; - - CTA_SYNC(); - - // Reduce parallelism down to just raking threads - if (linear_tid < RAKING_THREADS) - { - // Raking upsweep reduction across shared partials - T upsweep_partial = Upsweep(scan_op); - - // Exclusive Warp-synchronous scan - T exclusive_partial; - WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, scan_op); - - // Inclusive raking downsweep scan - InclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); - } - - CTA_SYNC(); - - // Grab thread prefix from shared memory - output = *placement_ptr; - } - } - - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - if (WARP_SYNCHRONOUS) - { - // Short-circuit directly to warp-synchronous scan - WarpScan(temp_storage.warp_scan).InclusiveScan(input, output, scan_op, block_aggregate); - } - else - { - // Place thread partial into shared memory raking grid - T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); - *placement_ptr = input; - - CTA_SYNC(); - - // Reduce parallelism down to just raking threads - if (linear_tid < RAKING_THREADS) - { - // Raking upsweep reduction across shared partials - T upsweep_partial = Upsweep(scan_op); - - // Warp-synchronous scan - T inclusive_partial; - T exclusive_partial; - WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, scan_op); - - // Inclusive raking downsweep scan - InclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); - - // Broadcast aggregate to all threads - if (linear_tid == RAKING_THREADS - 1) - temp_storage.block_aggregate = inclusive_partial; - } - - CTA_SYNC(); - - // Grab thread prefix from shared memory - output = *placement_ptr; - - // Retrieve block aggregate - block_aggregate = temp_storage.block_aggregate; - } - } - - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template < - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a threadblock-wide prefix to be applied to all inputs. - { - if (WARP_SYNCHRONOUS) - { - // Short-circuit directly to warp-synchronous scan - T block_aggregate; - WarpScan warp_scan(temp_storage.warp_scan); - warp_scan.InclusiveScan(input, output, scan_op, block_aggregate); - - // Obtain warp-wide prefix in lane0, then broadcast to other lanes - T block_prefix = block_prefix_callback_op(block_aggregate); - block_prefix = warp_scan.Broadcast(block_prefix, 0); - - // Update prefix with exclusive warpscan partial - output = scan_op(block_prefix, output); - } - else - { - // Place thread partial into shared memory raking grid - T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); - *placement_ptr = input; - - CTA_SYNC(); - - // Reduce parallelism down to just raking threads - if (linear_tid < RAKING_THREADS) - { - WarpScan warp_scan(temp_storage.warp_scan); - - // Raking upsweep reduction across shared partials - T upsweep_partial = Upsweep(scan_op); - - // Warp-synchronous scan - T exclusive_partial, block_aggregate; - warp_scan.ExclusiveScan(upsweep_partial, exclusive_partial, scan_op, block_aggregate); - - // Obtain block-wide prefix in lane0, then broadcast to other lanes - T block_prefix = block_prefix_callback_op(block_aggregate); - block_prefix = warp_scan.Broadcast(block_prefix, 0); - - // Update prefix with warpscan exclusive partial - T downsweep_prefix = scan_op(block_prefix, exclusive_partial); - if (linear_tid == 0) - downsweep_prefix = block_prefix; - - // Inclusive raking downsweep scan - InclusiveDownsweep(scan_op, downsweep_prefix); - } - - CTA_SYNC(); - - // Grab thread prefix from shared memory - output = *placement_ptr; - } - } - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans.cuh b/ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans.cuh deleted file mode 100644 index e40f735..0000000 --- a/ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans.cuh +++ /dev/null @@ -1,392 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::BlockScanWarpscans provides warpscan-based variants of parallel prefix scan across a CUDA threadblock. - */ - -#pragma once - -#include "../../util_arch.cuh" -#include "../../util_ptx.cuh" -#include "../../warp/warp_scan.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief BlockScanWarpScans provides warpscan-based variants of parallel prefix scan across a CUDA threadblock. - */ -template < - typename T, - int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension - int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension - int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct BlockScanWarpScans -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - /// Constants - enum - { - /// Number of warp threads - WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), - - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - - /// Number of active warps - WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, - }; - - /// WarpScan utility type - typedef WarpScan WarpScanT; - - /// WarpScan utility type - typedef WarpScan WarpAggregateScan; - - /// Shared memory storage layout type - - struct __align__(32) _TempStorage - { - T warp_aggregates[WARPS]; - typename WarpScanT::TempStorage warp_scan[WARPS]; ///< Buffer for warp-synchronous scans - T block_prefix; ///< Shared prefix for the entire threadblock - }; - - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - // Thread fields - _TempStorage &temp_storage; - unsigned int linear_tid; - unsigned int warp_id; - unsigned int lane_id; - - - //--------------------------------------------------------------------- - // Constructors - //--------------------------------------------------------------------- - - /// Constructor - __device__ __forceinline__ BlockScanWarpScans( - TempStorage &temp_storage) - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), - warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS), - lane_id(LaneId()) - {} - - - //--------------------------------------------------------------------- - // Utility methods - //--------------------------------------------------------------------- - - template - __device__ __forceinline__ void ApplyWarpAggregates( - T &warp_prefix, ///< [out] The calling thread's partial reduction - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items - Int2Type /*addend_warp*/) - { - if (warp_id == WARP) - warp_prefix = block_aggregate; - - T addend = temp_storage.warp_aggregates[WARP]; - block_aggregate = scan_op(block_aggregate, addend); - - ApplyWarpAggregates(warp_prefix, scan_op, block_aggregate, Int2Type()); - } - - template - __device__ __forceinline__ void ApplyWarpAggregates( - T &/*warp_prefix*/, ///< [out] The calling thread's partial reduction - ScanOp /*scan_op*/, ///< [in] Binary scan operator - T &/*block_aggregate*/, ///< [out] Threadblock-wide aggregate reduction of input items - Int2Type /*addend_warp*/) - {} - - - /// Use the warp-wide aggregates to compute the calling warp's prefix. Also returns block-wide aggregate in all threads. - template - __device__ __forceinline__ T ComputeWarpPrefix( - ScanOp scan_op, ///< [in] Binary scan operator - T warp_aggregate, ///< [in] [laneWARP_THREADS - 1 only] Warp-wide aggregate reduction of input items - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - // Last lane in each warp shares its warp-aggregate - if (lane_id == WARP_THREADS - 1) - temp_storage.warp_aggregates[warp_id] = warp_aggregate; - - CTA_SYNC(); - - // Accumulate block aggregates and save the one that is our warp's prefix - T warp_prefix; - block_aggregate = temp_storage.warp_aggregates[0]; - - // Use template unrolling (since the PTX backend can't handle unrolling it for SM1x) - ApplyWarpAggregates(warp_prefix, scan_op, block_aggregate, Int2Type<1>()); -/* - #pragma unroll - for (int WARP = 1; WARP < WARPS; ++WARP) - { - if (warp_id == WARP) - warp_prefix = block_aggregate; - - T addend = temp_storage.warp_aggregates[WARP]; - block_aggregate = scan_op(block_aggregate, addend); - } -*/ - - return warp_prefix; - } - - - /// Use the warp-wide aggregates and initial-value to compute the calling warp's prefix. Also returns block-wide aggregate in all threads. - template - __device__ __forceinline__ T ComputeWarpPrefix( - ScanOp scan_op, ///< [in] Binary scan operator - T warp_aggregate, ///< [in] [laneWARP_THREADS - 1 only] Warp-wide aggregate reduction of input items - T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items - const T &initial_value) ///< [in] Initial value to seed the exclusive scan - { - T warp_prefix = ComputeWarpPrefix(scan_op, warp_aggregate, block_aggregate); - - warp_prefix = scan_op(initial_value, warp_prefix); - - if (warp_id == 0) - warp_prefix = initial_value; - - return warp_prefix; - } - - //--------------------------------------------------------------------- - // Exclusive scans - //--------------------------------------------------------------------- - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no initial value, the output computed for thread0 is undefined. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan operator - { - // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. - T block_aggregate; - ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input items - T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) - const T &initial_value, ///< [in] Initial value to seed the exclusive scan - ScanOp scan_op) ///< [in] Binary scan operator - { - T block_aggregate; - ExclusiveScan(input, exclusive_output, initial_value, scan_op, block_aggregate); - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for thread0 is undefined. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. - T inclusive_output; - WarpScanT(temp_storage.warp_scan[warp_id]).Scan(input, inclusive_output, exclusive_output, scan_op); - - // Compute the warp-wide prefix and block-wide aggregate for each warp. Warp prefix for warp0 is invalid. - T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate); - - // Apply warp prefix to our lane's partial - if (warp_id != 0) - { - exclusive_output = scan_op(warp_prefix, exclusive_output); - if (lane_id == 0) - exclusive_output = warp_prefix; - } - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input items - T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) - const T &initial_value, ///< [in] Initial value to seed the exclusive scan - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. - T inclusive_output; - WarpScanT(temp_storage.warp_scan[warp_id]).Scan(input, inclusive_output, exclusive_output, scan_op); - - // Compute the warp-wide prefix and block-wide aggregate for each warp - T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate, initial_value); - - // Apply warp prefix to our lane's partial - exclusive_output = scan_op(warp_prefix, exclusive_output); - if (lane_id == 0) - exclusive_output = warp_prefix; - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template < - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a threadblock-wide prefix to be applied to all inputs. - { - // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. - T block_aggregate; - ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); - - // Use the first warp to determine the threadblock prefix, returning the result in lane0 - if (warp_id == 0) - { - T block_prefix = block_prefix_callback_op(block_aggregate); - if (lane_id == 0) - { - // Share the prefix with all threads - temp_storage.block_prefix = block_prefix; - exclusive_output = block_prefix; // The block prefix is the exclusive output for tid0 - } - } - - CTA_SYNC(); - - // Incorporate threadblock prefix into outputs - T block_prefix = temp_storage.block_prefix; - if (linear_tid > 0) - { - exclusive_output = scan_op(block_prefix, exclusive_output); - } - } - - - //--------------------------------------------------------------------- - // Inclusive scans - //--------------------------------------------------------------------- - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan operator - { - T block_aggregate; - InclusiveScan(input, inclusive_output, scan_op, block_aggregate); - } - - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - WarpScanT(temp_storage.warp_scan[warp_id]).InclusiveScan(input, inclusive_output, scan_op); - - // Compute the warp-wide prefix and block-wide aggregate for each warp. Warp prefix for warp0 is invalid. - T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate); - - // Apply warp prefix to our lane's partial - if (warp_id != 0) - { - inclusive_output = scan_op(warp_prefix, inclusive_output); - } - } - - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template < - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a threadblock-wide prefix to be applied to all inputs. - { - T block_aggregate; - InclusiveScan(input, exclusive_output, scan_op, block_aggregate); - - // Use the first warp to determine the threadblock prefix, returning the result in lane0 - if (warp_id == 0) - { - T block_prefix = block_prefix_callback_op(block_aggregate); - if (lane_id == 0) - { - // Share the prefix with all threads - temp_storage.block_prefix = block_prefix; - } - } - - CTA_SYNC(); - - // Incorporate threadblock prefix into outputs - T block_prefix = temp_storage.block_prefix; - exclusive_output = scan_op(block_prefix, exclusive_output); - } - - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans2.cuh b/ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans2.cuh deleted file mode 100644 index 154833b..0000000 --- a/ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans2.cuh +++ /dev/null @@ -1,436 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::BlockScanWarpscans provides warpscan-based variants of parallel prefix scan across a CUDA threadblock. - */ - -#pragma once - -#include "../../util_arch.cuh" -#include "../../util_ptx.cuh" -#include "../../warp/warp_scan.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief BlockScanWarpScans provides warpscan-based variants of parallel prefix scan across a CUDA threadblock. - */ -template < - typename T, - int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension - int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension - int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct BlockScanWarpScans -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - /// Constants - enum - { - /// Number of warp threads - WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), - - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - - /// Number of active warps - WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, - }; - - /// WarpScan utility type - typedef WarpScan WarpScanT; - - /// WarpScan utility type - typedef WarpScan WarpAggregateScanT; - - /// Shared memory storage layout type - struct _TempStorage - { - typename WarpAggregateScanT::TempStorage inner_scan[WARPS]; ///< Buffer for warp-synchronous scans - typename WarpScanT::TempStorage warp_scan[WARPS]; ///< Buffer for warp-synchronous scans - T warp_aggregates[WARPS]; - T block_prefix; ///< Shared prefix for the entire threadblock - }; - - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - // Thread fields - _TempStorage &temp_storage; - unsigned int linear_tid; - unsigned int warp_id; - unsigned int lane_id; - - - //--------------------------------------------------------------------- - // Constructors - //--------------------------------------------------------------------- - - /// Constructor - __device__ __forceinline__ BlockScanWarpScans( - TempStorage &temp_storage) - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), - warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS), - lane_id(LaneId()) - {} - - - //--------------------------------------------------------------------- - // Utility methods - //--------------------------------------------------------------------- - - template - __device__ __forceinline__ void ApplyWarpAggregates( - T &warp_prefix, ///< [out] The calling thread's partial reduction - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items - Int2Type addend_warp) - { - if (warp_id == WARP) - warp_prefix = block_aggregate; - - T addend = temp_storage.warp_aggregates[WARP]; - block_aggregate = scan_op(block_aggregate, addend); - - ApplyWarpAggregates(warp_prefix, scan_op, block_aggregate, Int2Type()); - } - - template - __device__ __forceinline__ void ApplyWarpAggregates( - T &warp_prefix, ///< [out] The calling thread's partial reduction - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items - Int2Type addend_warp) - {} - - - /// Use the warp-wide aggregates to compute the calling warp's prefix. Also returns block-wide aggregate in all threads. - template - __device__ __forceinline__ T ComputeWarpPrefix( - ScanOp scan_op, ///< [in] Binary scan operator - T warp_aggregate, ///< [in] [laneWARP_THREADS - 1 only] Warp-wide aggregate reduction of input items - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - // Last lane in each warp shares its warp-aggregate - if (lane_id == WARP_THREADS - 1) - temp_storage.warp_aggregates[warp_id] = warp_aggregate; - - CTA_SYNC(); - - // Accumulate block aggregates and save the one that is our warp's prefix - T warp_prefix; - block_aggregate = temp_storage.warp_aggregates[0]; - - // Use template unrolling (since the PTX backend can't handle unrolling it for SM1x) - ApplyWarpAggregates(warp_prefix, scan_op, block_aggregate, Int2Type<1>()); -/* - #pragma unroll - for (int WARP = 1; WARP < WARPS; ++WARP) - { - if (warp_id == WARP) - warp_prefix = block_aggregate; - - T addend = temp_storage.warp_aggregates[WARP]; - block_aggregate = scan_op(block_aggregate, addend); - } -*/ - - return warp_prefix; - } - - - /// Use the warp-wide aggregates and initial-value to compute the calling warp's prefix. Also returns block-wide aggregate in all threads. - template - __device__ __forceinline__ T ComputeWarpPrefix( - ScanOp scan_op, ///< [in] Binary scan operator - T warp_aggregate, ///< [in] [laneWARP_THREADS - 1 only] Warp-wide aggregate reduction of input items - T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items - const T &initial_value) ///< [in] Initial value to seed the exclusive scan - { - T warp_prefix = ComputeWarpPrefix(scan_op, warp_aggregate, block_aggregate); - - warp_prefix = scan_op(initial_value, warp_prefix); - - if (warp_id == 0) - warp_prefix = initial_value; - - return warp_prefix; - } - - //--------------------------------------------------------------------- - // Exclusive scans - //--------------------------------------------------------------------- - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no initial value, the output computed for thread0 is undefined. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan operator - { - // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. - T block_aggregate; - ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input items - T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) - const T &initial_value, ///< [in] Initial value to seed the exclusive scan - ScanOp scan_op) ///< [in] Binary scan operator - { - T block_aggregate; - ExclusiveScan(input, exclusive_output, initial_value, scan_op, block_aggregate); - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for thread0 is undefined. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - WarpScanT my_warp_scan(temp_storage.warp_scan[warp_id]); - - // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. - T inclusive_output; - my_warp_scan.Scan(input, inclusive_output, exclusive_output, scan_op); - - // Compute the warp-wide prefix and block-wide aggregate for each warp. Warp prefix for warp0 is invalid. -// T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate); - -//-------------------------------------------------- - // Last lane in each warp shares its warp-aggregate - if (lane_id == WARP_THREADS - 1) - temp_storage.warp_aggregates[warp_id] = inclusive_output; - - CTA_SYNC(); - - // Get the warp scan partial - T warp_inclusive, warp_prefix; - if (lane_id < WARPS) - { - // Scan the warpscan partials - T warp_val = temp_storage.warp_aggregates[lane_id]; - WarpAggregateScanT(temp_storage.inner_scan[warp_id]).Scan(warp_val, warp_inclusive, warp_prefix, scan_op); - } - - warp_prefix = my_warp_scan.Broadcast(warp_prefix, warp_id); - block_aggregate = my_warp_scan.Broadcast(warp_inclusive, WARPS - 1); -//-------------------------------------------------- - - // Apply warp prefix to our lane's partial - if (warp_id != 0) - { - exclusive_output = scan_op(warp_prefix, exclusive_output); - if (lane_id == 0) - exclusive_output = warp_prefix; - } - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input items - T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) - const T &initial_value, ///< [in] Initial value to seed the exclusive scan - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - WarpScanT my_warp_scan(temp_storage.warp_scan[warp_id]); - - // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. - T inclusive_output; - my_warp_scan.Scan(input, inclusive_output, exclusive_output, scan_op); - - // Compute the warp-wide prefix and block-wide aggregate for each warp -// T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate, initial_value); - -//-------------------------------------------------- - // Last lane in each warp shares its warp-aggregate - if (lane_id == WARP_THREADS - 1) - temp_storage.warp_aggregates[warp_id] = inclusive_output; - - CTA_SYNC(); - - // Get the warp scan partial - T warp_inclusive, warp_prefix; - if (lane_id < WARPS) - { - // Scan the warpscan partials - T warp_val = temp_storage.warp_aggregates[lane_id]; - WarpAggregateScanT(temp_storage.inner_scan[warp_id]).Scan(warp_val, warp_inclusive, warp_prefix, initial_value, scan_op); - } - - warp_prefix = my_warp_scan.Broadcast(warp_prefix, warp_id); - block_aggregate = my_warp_scan.Broadcast(warp_inclusive, WARPS - 1); -//-------------------------------------------------- - - // Apply warp prefix to our lane's partial - exclusive_output = scan_op(warp_prefix, exclusive_output); - if (lane_id == 0) - exclusive_output = warp_prefix; - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template < - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a threadblock-wide prefix to be applied to all inputs. - { - // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. - T block_aggregate; - ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); - - // Use the first warp to determine the threadblock prefix, returning the result in lane0 - if (warp_id == 0) - { - T block_prefix = block_prefix_callback_op(block_aggregate); - if (lane_id == 0) - { - // Share the prefix with all threads - temp_storage.block_prefix = block_prefix; - exclusive_output = block_prefix; // The block prefix is the exclusive output for tid0 - } - } - - CTA_SYNC(); - - // Incorporate threadblock prefix into outputs - T block_prefix = temp_storage.block_prefix; - if (linear_tid > 0) - { - exclusive_output = scan_op(block_prefix, exclusive_output); - } - } - - - //--------------------------------------------------------------------- - // Inclusive scans - //--------------------------------------------------------------------- - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan operator - { - T block_aggregate; - InclusiveScan(input, inclusive_output, scan_op, block_aggregate); - } - - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - WarpScanT(temp_storage.warp_scan[warp_id]).InclusiveScan(input, inclusive_output, scan_op); - - // Compute the warp-wide prefix and block-wide aggregate for each warp. Warp prefix for warp0 is invalid. - T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate); - - // Apply warp prefix to our lane's partial - if (warp_id != 0) - { - inclusive_output = scan_op(warp_prefix, inclusive_output); - } - } - - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template < - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a threadblock-wide prefix to be applied to all inputs. - { - T block_aggregate; - InclusiveScan(input, exclusive_output, scan_op, block_aggregate); - - // Use the first warp to determine the threadblock prefix, returning the result in lane0 - if (warp_id == 0) - { - T block_prefix = block_prefix_callback_op(block_aggregate); - if (lane_id == 0) - { - // Share the prefix with all threads - temp_storage.block_prefix = block_prefix; - } - } - - CTA_SYNC(); - - // Incorporate threadblock prefix into outputs - T block_prefix = temp_storage.block_prefix; - exclusive_output = scan_op(block_prefix, exclusive_output); - } - - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans3.cuh b/ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans3.cuh deleted file mode 100644 index 811d84f..0000000 --- a/ml-xgboost/cub/cub/block/specializations/block_scan_warp_scans3.cuh +++ /dev/null @@ -1,412 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::BlockScanWarpscans provides warpscan-based variants of parallel prefix scan across a CUDA threadblock. - */ - -#pragma once - -#include "../../util_arch.cuh" -#include "../../util_ptx.cuh" -#include "../../warp/warp_scan.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief BlockScanWarpScans provides warpscan-based variants of parallel prefix scan across a CUDA threadblock. - */ -template < - typename T, - int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension - int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension - int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct BlockScanWarpScans -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - /// Constants - enum - { - /// The thread block size in threads - BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, - - /// Number of warp threads - INNER_WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), - OUTER_WARP_THREADS = BLOCK_THREADS / INNER_WARP_THREADS, - - /// Number of outer scan warps - OUTER_WARPS = INNER_WARP_THREADS - }; - - /// Outer WarpScan utility type - typedef WarpScan OuterWarpScanT; - - /// Inner WarpScan utility type - typedef WarpScan InnerWarpScanT; - - typedef typename OuterWarpScanT::TempStorage OuterScanArray[OUTER_WARPS]; - - - /// Shared memory storage layout type - struct _TempStorage - { - union - { - Uninitialized outer_warp_scan; ///< Buffer for warp-synchronous outer scans - typename InnerWarpScanT::TempStorage inner_warp_scan; ///< Buffer for warp-synchronous inner scan - }; - T warp_aggregates[OUTER_WARPS]; - T block_aggregate; ///< Shared prefix for the entire threadblock - }; - - - /// Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - // Thread fields - _TempStorage &temp_storage; - unsigned int linear_tid; - unsigned int warp_id; - unsigned int lane_id; - - - //--------------------------------------------------------------------- - // Constructors - //--------------------------------------------------------------------- - - /// Constructor - __device__ __forceinline__ BlockScanWarpScans( - TempStorage &temp_storage) - : - temp_storage(temp_storage.Alias()), - linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), - warp_id((OUTER_WARPS == 1) ? 0 : linear_tid / OUTER_WARP_THREADS), - lane_id((OUTER_WARPS == 1) ? linear_tid : linear_tid % OUTER_WARP_THREADS) - {} - - - //--------------------------------------------------------------------- - // Exclusive scans - //--------------------------------------------------------------------- - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no initial value, the output computed for thread0 is undefined. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan operator - { - // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. - T block_aggregate; - ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input items - T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) - const T &initial_value, ///< [in] Initial value to seed the exclusive scan - ScanOp scan_op) ///< [in] Binary scan operator - { - T block_aggregate; - ExclusiveScan(input, exclusive_output, initial_value, scan_op, block_aggregate); - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for thread0 is undefined. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. - T inclusive_output; - OuterWarpScanT(temp_storage.outer_warp_scan.Alias()[warp_id]).Scan(input, inclusive_output, exclusive_output, scan_op); - - // Share outer warp total - if (lane_id == OUTER_WARP_THREADS - 1) - temp_storage.warp_aggregates[warp_id] = inclusive_output; - - CTA_SYNC(); - - if (linear_tid < INNER_WARP_THREADS) - { - T outer_warp_input = temp_storage.warp_aggregates[linear_tid]; - T outer_warp_exclusive; - - InnerWarpScanT(temp_storage.inner_warp_scan).ExclusiveScan( - outer_warp_input, outer_warp_exclusive, scan_op, block_aggregate); - - temp_storage.block_aggregate = block_aggregate; - temp_storage.warp_aggregates[linear_tid] = outer_warp_exclusive; - } - - CTA_SYNC(); - - if (warp_id != 0) - { - // Retrieve block aggregate - block_aggregate = temp_storage.block_aggregate; - - // Apply warp prefix to our lane's partial - T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; - exclusive_output = scan_op(outer_warp_exclusive, exclusive_output); - if (lane_id == 0) - exclusive_output = outer_warp_exclusive; - } - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input items - T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) - const T &initial_value, ///< [in] Initial value to seed the exclusive scan - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. - T inclusive_output; - OuterWarpScanT(temp_storage.outer_warp_scan.Alias()[warp_id]).Scan(input, inclusive_output, exclusive_output, scan_op); - - // Share outer warp total - if (lane_id == OUTER_WARP_THREADS - 1) - { - temp_storage.warp_aggregates[warp_id] = inclusive_output; - } - - CTA_SYNC(); - - if (linear_tid < INNER_WARP_THREADS) - { - T outer_warp_input = temp_storage.warp_aggregates[linear_tid]; - T outer_warp_exclusive; - - InnerWarpScanT(temp_storage.inner_warp_scan).ExclusiveScan( - outer_warp_input, outer_warp_exclusive, initial_value, scan_op, block_aggregate); - - temp_storage.block_aggregate = block_aggregate; - temp_storage.warp_aggregates[linear_tid] = outer_warp_exclusive; - } - - CTA_SYNC(); - - // Retrieve block aggregate - block_aggregate = temp_storage.block_aggregate; - - // Apply warp prefix to our lane's partial - T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; - exclusive_output = scan_op(outer_warp_exclusive, exclusive_output); - if (lane_id == 0) - exclusive_output = outer_warp_exclusive; - } - - - /// Computes an exclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. The call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. - template < - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item - T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a threadblock-wide prefix to be applied to all inputs. - { - // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. - T inclusive_output; - OuterWarpScanT(temp_storage.outer_warp_scan.Alias()[warp_id]).Scan(input, inclusive_output, exclusive_output, scan_op); - - // Share outer warp total - if (lane_id == OUTER_WARP_THREADS - 1) - temp_storage.warp_aggregates[warp_id] = inclusive_output; - - CTA_SYNC(); - - if (linear_tid < INNER_WARP_THREADS) - { - InnerWarpScanT inner_scan(temp_storage.inner_warp_scan); - - T upsweep = temp_storage.warp_aggregates[linear_tid]; - T downsweep_prefix, block_aggregate; - - inner_scan.ExclusiveScan(upsweep, downsweep_prefix, scan_op, block_aggregate); - - // Use callback functor to get block prefix in lane0 and then broadcast to other lanes - T block_prefix = block_prefix_callback_op(block_aggregate); - block_prefix = inner_scan.Broadcast(block_prefix, 0); - - downsweep_prefix = scan_op(block_prefix, downsweep_prefix); - if (linear_tid == 0) - downsweep_prefix = block_prefix; - - temp_storage.warp_aggregates[linear_tid] = downsweep_prefix; - } - - CTA_SYNC(); - - // Apply warp prefix to our lane's partial (or assign it if partial is invalid) - T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; - exclusive_output = scan_op(outer_warp_exclusive, exclusive_output); - if (lane_id == 0) - exclusive_output = outer_warp_exclusive; - } - - - //--------------------------------------------------------------------- - // Inclusive scans - //--------------------------------------------------------------------- - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan operator - { - T block_aggregate; - InclusiveScan(input, inclusive_output, scan_op, block_aggregate); - } - - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items - { - // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. - OuterWarpScanT(temp_storage.outer_warp_scan.Alias()[warp_id]).InclusiveScan( - input, inclusive_output, scan_op); - - // Share outer warp total - if (lane_id == OUTER_WARP_THREADS - 1) - temp_storage.warp_aggregates[warp_id] = inclusive_output; - - CTA_SYNC(); - - if (linear_tid < INNER_WARP_THREADS) - { - T outer_warp_input = temp_storage.warp_aggregates[linear_tid]; - T outer_warp_exclusive; - - InnerWarpScanT(temp_storage.inner_warp_scan).ExclusiveScan( - outer_warp_input, outer_warp_exclusive, scan_op, block_aggregate); - - temp_storage.block_aggregate = block_aggregate; - temp_storage.warp_aggregates[linear_tid] = outer_warp_exclusive; - } - - CTA_SYNC(); - - if (warp_id != 0) - { - // Retrieve block aggregate - block_aggregate = temp_storage.block_aggregate; - - // Apply warp prefix to our lane's partial - T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; - inclusive_output = scan_op(outer_warp_exclusive, inclusive_output); - } - } - - - /// Computes an inclusive threadblock-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by lane0 in that warp is used as the "seed" value that logically prefixes the threadblock's scan inputs. - template < - typename ScanOp, - typename BlockPrefixCallbackOp> - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item - T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] [warp0 only] Call-back functor for specifying a threadblock-wide prefix to be applied to all inputs. - { - // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. - OuterWarpScanT(temp_storage.outer_warp_scan.Alias()[warp_id]).InclusiveScan( - input, inclusive_output, scan_op); - - // Share outer warp total - if (lane_id == OUTER_WARP_THREADS - 1) - temp_storage.warp_aggregates[warp_id] = inclusive_output; - - CTA_SYNC(); - - if (linear_tid < INNER_WARP_THREADS) - { - InnerWarpScanT inner_scan(temp_storage.inner_warp_scan); - - T upsweep = temp_storage.warp_aggregates[linear_tid]; - T downsweep_prefix, block_aggregate; - inner_scan.ExclusiveScan(upsweep, downsweep_prefix, scan_op, block_aggregate); - - // Use callback functor to get block prefix in lane0 and then broadcast to other lanes - T block_prefix = block_prefix_callback_op(block_aggregate); - block_prefix = inner_scan.Broadcast(block_prefix, 0); - - downsweep_prefix = scan_op(block_prefix, downsweep_prefix); - if (linear_tid == 0) - downsweep_prefix = block_prefix; - - temp_storage.warp_aggregates[linear_tid] = downsweep_prefix; - } - - CTA_SYNC(); - - // Apply warp prefix to our lane's partial - T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; - inclusive_output = scan_op(outer_warp_exclusive, inclusive_output); - } - - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/cub.cuh b/ml-xgboost/cub/cub/cub.cuh deleted file mode 100644 index 1150784..0000000 --- a/ml-xgboost/cub/cub/cub.cuh +++ /dev/null @@ -1,96 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * CUB umbrella include file - */ - -#pragma once - - -// Block -#include "block/block_histogram.cuh" -#include "block/block_discontinuity.cuh" -#include "block/block_exchange.cuh" -#include "block/block_load.cuh" -#include "block/block_radix_rank.cuh" -#include "block/block_radix_sort.cuh" -#include "block/block_reduce.cuh" -#include "block/block_scan.cuh" -#include "block/block_store.cuh" -//#include "block/block_shift.cuh" - -// Device -#include "device/device_histogram.cuh" -#include "device/device_partition.cuh" -#include "device/device_radix_sort.cuh" -#include "device/device_reduce.cuh" -#include "device/device_run_length_encode.cuh" -#include "device/device_scan.cuh" -#include "device/device_segmented_radix_sort.cuh" -#include "device/device_segmented_reduce.cuh" -#include "device/device_select.cuh" -#include "device/device_spmv.cuh" - -// Grid -//#include "grid/grid_barrier.cuh" -#include "grid/grid_even_share.cuh" -#include "grid/grid_mapping.cuh" -#include "grid/grid_queue.cuh" - -// Thread -#include "thread/thread_load.cuh" -#include "thread/thread_operators.cuh" -#include "thread/thread_reduce.cuh" -#include "thread/thread_scan.cuh" -#include "thread/thread_store.cuh" - -// Warp -#include "warp/warp_reduce.cuh" -#include "warp/warp_scan.cuh" - -// Iterator -#include "iterator/arg_index_input_iterator.cuh" -#include "iterator/cache_modified_input_iterator.cuh" -#include "iterator/cache_modified_output_iterator.cuh" -#include "iterator/constant_input_iterator.cuh" -#include "iterator/counting_input_iterator.cuh" -#include "iterator/tex_obj_input_iterator.cuh" -#include "iterator/tex_ref_input_iterator.cuh" -#include "iterator/transform_input_iterator.cuh" - -// Util -#include "util_allocator.cuh" -#include "util_arch.cuh" -#include "util_debug.cuh" -#include "util_device.cuh" -#include "util_macro.cuh" -#include "util_ptx.cuh" -#include "util_type.cuh" - diff --git a/ml-xgboost/cub/cub/device/device_histogram.cuh b/ml-xgboost/cub/cub/device/device_histogram.cuh deleted file mode 100644 index 9a4e099..0000000 --- a/ml-xgboost/cub/cub/device/device_histogram.cuh +++ /dev/null @@ -1,866 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceHistogram provides device-wide parallel operations for constructing histogram(s) from a sequence of samples data residing within device-accessible memory. - */ - -#pragma once - -#include -#include -#include - -#include "dispatch/dispatch_histogram.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief DeviceHistogram provides device-wide parallel operations for constructing histogram(s) from a sequence of samples data residing within device-accessible memory. ![](histogram_logo.png) - * \ingroup SingleModule - * - * \par Overview - * A histogram - * counts the number of observations that fall into each of the disjoint categories (known as bins). - * - * \par Usage Considerations - * \cdp_class{DeviceHistogram} - * - */ -struct DeviceHistogram -{ - /******************************************************************//** - * \name Evenly-segmented bin ranges - *********************************************************************/ - //@{ - - /** - * \brief Computes an intensity histogram from a sequence of data samples using equal-width bins. - * - * \par - * - The number of histogram bins is (\p num_levels - 1) - * - All bins comprise the same width of sample values: (\p upper_level - \p lower_level) / (\p num_levels - 1) - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the computation of a six-bin histogram - * from a sequence of float samples - * - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input samples and - * // output histogram - * int num_samples; // e.g., 10 - * float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, 0.3, 2.9, 2.0, 6.1, 999.5] - * int* d_histogram; // e.g., [ -, -, -, -, -, -, -, -] - * int num_levels; // e.g., 7 (seven level boundaries for six bins) - * float lower_level; // e.g., 0.0 (lower sample value boundary of lowest bin) - * float upper_level; // e.g., 12.0 (upper sample value boundary of upper bin) - * ... - * - * // Determine temporary device storage requirements - * void* d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, lower_level, upper_level, num_samples); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Compute histograms - * cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, lower_level, upper_level, num_samples); - * - * // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0]; - * - * \endcode - * - * \tparam SampleIteratorT [inferred] Random-access input iterator type for reading input samples. \iterator - * \tparam CounterT [inferred] Integer type for histogram bin counters - * \tparam LevelT [inferred] Type for specifying boundaries (levels) - * \tparam OffsetT [inferred] Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 - */ - template < - typename SampleIteratorT, - typename CounterT, - typename LevelT, - typename OffsetT> - CUB_RUNTIME_FUNCTION - static cudaError_t HistogramEven( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples. - CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length num_levels - 1. - int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is num_levels - 1. - LevelT lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin. - LevelT upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin. - OffsetT num_samples, ///< [in] The number of input samples (i.e., the length of \p d_samples) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - /// The sample value type of the input iterator - typedef typename std::iterator_traits::value_type SampleT; - - CounterT* d_histogram1[1] = {d_histogram}; - int num_levels1[1] = {num_levels}; - LevelT lower_level1[1] = {lower_level}; - LevelT upper_level1[1] = {upper_level}; - - return MultiHistogramEven<1, 1>( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_histogram1, - num_levels1, - lower_level1, - upper_level1, - num_samples, - 1, - sizeof(SampleT) * num_samples, - stream, - debug_synchronous); - } - - - /** - * \brief Computes an intensity histogram from a sequence of data samples using equal-width bins. - * - * \par - * - A two-dimensional region of interest within \p d_samples can be specified - * using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters. - * - The row stride must be a whole multiple of the sample data type - * size, i.e., (row_stride_bytes % sizeof(SampleT)) == 0. - * - The number of histogram bins is (\p num_levels - 1) - * - All bins comprise the same width of sample values: (\p upper_level - \p lower_level) / (\p num_levels - 1) - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the computation of a six-bin histogram - * from a 2x5 region of interest within a flattened 2x7 array of float samples. - * - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input samples and - * // output histogram - * int num_row_samples; // e.g., 5 - * int num_rows; // e.g., 2; - * size_t row_stride_bytes; // e.g., 7 * sizeof(float) - * float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, -, -, - * // 0.3, 2.9, 2.0, 6.1, 999.5, -, -] - * int* d_histogram; // e.g., [ -, -, -, -, -, -, -, -] - * int num_levels; // e.g., 7 (seven level boundaries for six bins) - * float lower_level; // e.g., 0.0 (lower sample value boundary of lowest bin) - * float upper_level; // e.g., 12.0 (upper sample value boundary of upper bin) - * ... - * - * // Determine temporary device storage requirements - * void* d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, lower_level, upper_level, - * num_row_samples, num_rows, row_stride_bytes); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Compute histograms - * cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes, d_samples, d_histogram, - * d_samples, d_histogram, num_levels, lower_level, upper_level, - * num_row_samples, num_rows, row_stride_bytes); - * - * // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0]; - * - * \endcode - * - * \tparam SampleIteratorT [inferred] Random-access input iterator type for reading input samples. \iterator - * \tparam CounterT [inferred] Integer type for histogram bin counters - * \tparam LevelT [inferred] Type for specifying boundaries (levels) - * \tparam OffsetT [inferred] Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 - */ - template < - typename SampleIteratorT, - typename CounterT, - typename LevelT, - typename OffsetT> - CUB_RUNTIME_FUNCTION - static cudaError_t HistogramEven( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples. - CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length num_levels - 1. - int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is num_levels - 1. - LevelT lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin. - LevelT upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin. - OffsetT num_row_samples, ///< [in] The number of data samples per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - CounterT* d_histogram1[1] = {d_histogram}; - int num_levels1[1] = {num_levels}; - LevelT lower_level1[1] = {lower_level}; - LevelT upper_level1[1] = {upper_level}; - - return MultiHistogramEven<1, 1>( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_histogram1, - num_levels1, - lower_level1, - upper_level1, - num_row_samples, - num_rows, - row_stride_bytes, - stream, - debug_synchronous); - } - - /** - * \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using equal-width bins. - * - * \par - * - The input is a sequence of pixel structures, where each pixel comprises - * a record of \p NUM_CHANNELS consecutive data samples (e.g., an RGBA pixel). - * - Of the \p NUM_CHANNELS specified, the function will only compute histograms - * for the first \p NUM_ACTIVE_CHANNELS (e.g., only RGB histograms from RGBA - * pixel samples). - * - The number of histogram bins for channeli is num_levels[i] - 1. - * - For channeli, the range of values for all histogram bins - * have the same width: (upper_level[i] - lower_level[i]) / ( num_levels[i] - 1) - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the computation of three 256-bin RGB histograms - * from a quad-channel sequence of RGBA pixels (8 bits per channel per pixel) - * - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input samples - * // and output histograms - * int num_pixels; // e.g., 5 - * unsigned char* d_samples; // e.g., [(2, 6, 7, 5), (3, 0, 2, 1), (7, 0, 6, 2), - * // (0, 6, 7, 5), (3, 0, 2, 6)] - * int* d_histogram[3]; // e.g., three device pointers to three device buffers, - * // each allocated with 256 integer counters - * int num_levels[3]; // e.g., {257, 257, 257}; - * unsigned int lower_level[3]; // e.g., {0, 0, 0}; - * unsigned int upper_level[3]; // e.g., {256, 256, 256}; - * ... - * - * // Determine temporary device storage requirements - * void* d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, lower_level, upper_level, num_pixels); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Compute histograms - * cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, lower_level, upper_level, num_pixels); - * - * // d_histogram <-- [ [1, 0, 1, 2, 0, 0, 0, 1, 0, 0, 0, ..., 0], - * // [0, 3, 0, 0, 0, 0, 2, 0, 0, 0, 0, ..., 0], - * // [0, 0, 2, 0, 0, 0, 1, 2, 0, 0, 0, ..., 0] ] - * - * \endcode - * - * \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) - * \tparam NUM_ACTIVE_CHANNELS [inferred] Number of channels actively being histogrammed - * \tparam SampleIteratorT [inferred] Random-access input iterator type for reading input samples. \iterator - * \tparam CounterT [inferred] Integer type for histogram bin counters - * \tparam LevelT [inferred] Type for specifying boundaries (levels) - * \tparam OffsetT [inferred] Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 - */ - template < - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename SampleIteratorT, - typename CounterT, - typename LevelT, - typename OffsetT> - CUB_RUNTIME_FUNCTION - static cudaError_t MultiHistogramEven( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histogram[i] should be num_levels[i] - 1. - int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. - OffsetT num_pixels, ///< [in] The number of multi-channel pixels (i.e., the length of \p d_samples / NUM_CHANNELS) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - /// The sample value type of the input iterator - typedef typename std::iterator_traits::value_type SampleT; - - return MultiHistogramEven( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_histogram, - num_levels, - lower_level, - upper_level, - num_pixels, - 1, - sizeof(SampleT) * NUM_CHANNELS * num_pixels, - stream, - debug_synchronous); - } - - - /** - * \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using equal-width bins. - * - * \par - * - The input is a sequence of pixel structures, where each pixel comprises - * a record of \p NUM_CHANNELS consecutive data samples (e.g., an RGBA pixel). - * - Of the \p NUM_CHANNELS specified, the function will only compute histograms - * for the first \p NUM_ACTIVE_CHANNELS (e.g., only RGB histograms from RGBA - * pixel samples). - * - A two-dimensional region of interest within \p d_samples can be specified - * using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters. - * - The row stride must be a whole multiple of the sample data type - * size, i.e., (row_stride_bytes % sizeof(SampleT)) == 0. - * - The number of histogram bins for channeli is num_levels[i] - 1. - * - For channeli, the range of values for all histogram bins - * have the same width: (upper_level[i] - lower_level[i]) / ( num_levels[i] - 1) - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the computation of three 256-bin RGB histograms from a 2x3 region of - * interest of within a flattened 2x4 array of quad-channel RGBA pixels (8 bits per channel per pixel). - * - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input samples - * // and output histograms - * int num_row_pixels; // e.g., 3 - * int num_rows; // e.g., 2 - * size_t row_stride_bytes; // e.g., 4 * sizeof(unsigned char) * NUM_CHANNELS - * unsigned char* d_samples; // e.g., [(2, 6, 7, 5), (3, 0, 2, 1), (7, 0, 6, 2), (-, -, -, -), - * // (0, 6, 7, 5), (3, 0, 2, 6), (1, 1, 1, 1), (-, -, -, -)] - * int* d_histogram[3]; // e.g., three device pointers to three device buffers, - * // each allocated with 256 integer counters - * int num_levels[3]; // e.g., {257, 257, 257}; - * unsigned int lower_level[3]; // e.g., {0, 0, 0}; - * unsigned int upper_level[3]; // e.g., {256, 256, 256}; - * ... - * - * // Determine temporary device storage requirements - * void* d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, lower_level, upper_level, - * num_row_pixels, num_rows, row_stride_bytes); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Compute histograms - * cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, lower_level, upper_level, - * num_row_pixels, num_rows, row_stride_bytes); - * - * // d_histogram <-- [ [1, 1, 1, 2, 0, 0, 0, 1, 0, 0, 0, ..., 0], - * // [0, 4, 0, 0, 0, 0, 2, 0, 0, 0, 0, ..., 0], - * // [0, 1, 2, 0, 0, 0, 1, 2, 0, 0, 0, ..., 0] ] - * - * \endcode - * - * \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) - * \tparam NUM_ACTIVE_CHANNELS [inferred] Number of channels actively being histogrammed - * \tparam SampleIteratorT [inferred] Random-access input iterator type for reading input samples. \iterator - * \tparam CounterT [inferred] Integer type for histogram bin counters - * \tparam LevelT [inferred] Type for specifying boundaries (levels) - * \tparam OffsetT [inferred] Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 - */ - template < - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename SampleIteratorT, - typename CounterT, - typename LevelT, - typename OffsetT> - CUB_RUNTIME_FUNCTION - static cudaError_t MultiHistogramEven( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histogram[i] should be num_levels[i] - 1. - int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - /// The sample value type of the input iterator - typedef typename std::iterator_traits::value_type SampleT; - Int2Type is_byte_sample; - - if ((sizeof(OffsetT) > sizeof(int)) && - ((unsigned long long) (num_rows * row_stride_bytes) < (unsigned long long) std::numeric_limits::max())) - { - // Down-convert OffsetT data type - - - return DipatchHistogram::DispatchEven( - d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, lower_level, upper_level, - (int) num_row_pixels, (int) num_rows, (int) (row_stride_bytes / sizeof(SampleT)), - stream, debug_synchronous, is_byte_sample); - } - - return DipatchHistogram::DispatchEven( - d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, lower_level, upper_level, - num_row_pixels, num_rows, (OffsetT) (row_stride_bytes / sizeof(SampleT)), - stream, debug_synchronous, is_byte_sample); - } - - - //@} end member group - /******************************************************************//** - * \name Custom bin ranges - *********************************************************************/ - //@{ - - /** - * \brief Computes an intensity histogram from a sequence of data samples using the specified bin boundary levels. - * - * \par - * - The number of histogram bins is (\p num_levels - 1) - * - The value range for bini is [level[i], level[i+1]) - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the computation of an six-bin histogram - * from a sequence of float samples - * - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input samples and - * // output histogram - * int num_samples; // e.g., 10 - * float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, 0.3, 2.9, 2.0, 6.1, 999.5] - * int* d_histogram; // e.g., [ -, -, -, -, -, -, -, -] - * int num_levels // e.g., 7 (seven level boundaries for six bins) - * float* d_levels; // e.g., [0.0, 2.0, 4.0, 6.0, 8.0, 12.0, 16.0] - * ... - * - * // Determine temporary device storage requirements - * void* d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, d_levels, num_samples); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Compute histograms - * cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, d_levels, num_samples); - * - * // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0]; - * - * \endcode - * - * \tparam SampleIteratorT [inferred] Random-access input iterator type for reading input samples. \iterator - * \tparam CounterT [inferred] Integer type for histogram bin counters - * \tparam LevelT [inferred] Type for specifying boundaries (levels) - * \tparam OffsetT [inferred] Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 - */ - template < - typename SampleIteratorT, - typename CounterT, - typename LevelT, - typename OffsetT> - CUB_RUNTIME_FUNCTION - static cudaError_t HistogramRange( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples. - CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length num_levels - 1. - int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is num_levels - 1. - LevelT* d_levels, ///< [in] The pointer to the array of boundaries (levels). Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. - OffsetT num_samples, ///< [in] The number of data samples per row in the region of interest - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - /// The sample value type of the input iterator - typedef typename std::iterator_traits::value_type SampleT; - - CounterT* d_histogram1[1] = {d_histogram}; - int num_levels1[1] = {num_levels}; - LevelT* d_levels1[1] = {d_levels}; - - return MultiHistogramRange<1, 1>( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_histogram1, - num_levels1, - d_levels1, - num_samples, - 1, - sizeof(SampleT) * num_samples, - stream, - debug_synchronous); - } - - - /** - * \brief Computes an intensity histogram from a sequence of data samples using the specified bin boundary levels. - * - * \par - * - A two-dimensional region of interest within \p d_samples can be specified - * using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters. - * - The row stride must be a whole multiple of the sample data type - * size, i.e., (row_stride_bytes % sizeof(SampleT)) == 0. - * - The number of histogram bins is (\p num_levels - 1) - * - The value range for bini is [level[i], level[i+1]) - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the computation of a six-bin histogram - * from a 2x5 region of interest within a flattened 2x7 array of float samples. - * - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input samples and - * // output histogram - * int num_row_samples; // e.g., 5 - * int num_rows; // e.g., 2; - * int row_stride_bytes; // e.g., 7 * sizeof(float) - * float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, -, -, - * // 0.3, 2.9, 2.0, 6.1, 999.5, -, -] - * int* d_histogram; // e.g., [ , , , , , , , ] - * int num_levels // e.g., 7 (seven level boundaries for six bins) - * float *d_levels; // e.g., [0.0, 2.0, 4.0, 6.0, 8.0, 12.0, 16.0] - * ... - * - * // Determine temporary device storage requirements - * void* d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, d_levels, - * num_row_samples, num_rows, row_stride_bytes); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Compute histograms - * cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, d_levels, - * num_row_samples, num_rows, row_stride_bytes); - * - * // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0]; - * - * \endcode - * - * \tparam SampleIteratorT [inferred] Random-access input iterator type for reading input samples. \iterator - * \tparam CounterT [inferred] Integer type for histogram bin counters - * \tparam LevelT [inferred] Type for specifying boundaries (levels) - * \tparam OffsetT [inferred] Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 - */ - template < - typename SampleIteratorT, - typename CounterT, - typename LevelT, - typename OffsetT> - CUB_RUNTIME_FUNCTION - static cudaError_t HistogramRange( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples. - CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length num_levels - 1. - int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is num_levels - 1. - LevelT* d_levels, ///< [in] The pointer to the array of boundaries (levels). Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. - OffsetT num_row_samples, ///< [in] The number of data samples per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - CounterT* d_histogram1[1] = {d_histogram}; - int num_levels1[1] = {num_levels}; - LevelT* d_levels1[1] = {d_levels}; - - return MultiHistogramRange<1, 1>( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_histogram1, - num_levels1, - d_levels1, - num_row_samples, - num_rows, - row_stride_bytes, - stream, - debug_synchronous); - } - - /** - * \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using the specified bin boundary levels. - * - * \par - * - The input is a sequence of pixel structures, where each pixel comprises - * a record of \p NUM_CHANNELS consecutive data samples (e.g., an RGBA pixel). - * - Of the \p NUM_CHANNELS specified, the function will only compute histograms - * for the first \p NUM_ACTIVE_CHANNELS (e.g., RGB histograms from RGBA - * pixel samples). - * - The number of histogram bins for channeli is num_levels[i] - 1. - * - For channeli, the range of values for all histogram bins - * have the same width: (upper_level[i] - lower_level[i]) / ( num_levels[i] - 1) - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the computation of three 4-bin RGB histograms - * from a quad-channel sequence of RGBA pixels (8 bits per channel per pixel) - * - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input samples - * // and output histograms - * int num_pixels; // e.g., 5 - * unsigned char *d_samples; // e.g., [(2, 6, 7, 5),(3, 0, 2, 1),(7, 0, 6, 2), - * // (0, 6, 7, 5),(3, 0, 2, 6)] - * unsigned int *d_histogram[3]; // e.g., [[ -, -, -, -],[ -, -, -, -],[ -, -, -, -]]; - * int num_levels[3]; // e.g., {5, 5, 5}; - * unsigned int *d_levels[3]; // e.g., [ [0, 2, 4, 6, 8], - * // [0, 2, 4, 6, 8], - * // [0, 2, 4, 6, 8] ]; - * ... - * - * // Determine temporary device storage requirements - * void* d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, d_levels, num_pixels); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Compute histograms - * cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, d_levels, num_pixels); - * - * // d_histogram <-- [ [1, 3, 0, 1], - * // [3, 0, 0, 2], - * // [0, 2, 0, 3] ] - * - * \endcode - * - * \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) - * \tparam NUM_ACTIVE_CHANNELS [inferred] Number of channels actively being histogrammed - * \tparam SampleIteratorT [inferred] Random-access input iterator type for reading input samples. \iterator - * \tparam CounterT [inferred] Integer type for histogram bin counters - * \tparam LevelT [inferred] Type for specifying boundaries (levels) - * \tparam OffsetT [inferred] Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 - */ - template < - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename SampleIteratorT, - typename CounterT, - typename LevelT, - typename OffsetT> - CUB_RUNTIME_FUNCTION - static cudaError_t MultiHistogramRange( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histogram[i] should be num_levels[i] - 1. - int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT* d_levels[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. - OffsetT num_pixels, ///< [in] The number of multi-channel pixels (i.e., the length of \p d_samples / NUM_CHANNELS) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - /// The sample value type of the input iterator - typedef typename std::iterator_traits::value_type SampleT; - - return MultiHistogramRange( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_histogram, - num_levels, - d_levels, - num_pixels, - 1, - sizeof(SampleT) * NUM_CHANNELS * num_pixels, - stream, - debug_synchronous); - } - - - /** - * \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using the specified bin boundary levels. - * - * \par - * - The input is a sequence of pixel structures, where each pixel comprises - * a record of \p NUM_CHANNELS consecutive data samples (e.g., an RGBA pixel). - * - Of the \p NUM_CHANNELS specified, the function will only compute histograms - * for the first \p NUM_ACTIVE_CHANNELS (e.g., RGB histograms from RGBA - * pixel samples). - * - A two-dimensional region of interest within \p d_samples can be specified - * using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters. - * - The row stride must be a whole multiple of the sample data type - * size, i.e., (row_stride_bytes % sizeof(SampleT)) == 0. - * - The number of histogram bins for channeli is num_levels[i] - 1. - * - For channeli, the range of values for all histogram bins - * have the same width: (upper_level[i] - lower_level[i]) / ( num_levels[i] - 1) - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the computation of three 4-bin RGB histograms from a 2x3 region of - * interest of within a flattened 2x4 array of quad-channel RGBA pixels (8 bits per channel per pixel). - * - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input samples - * // and output histograms - * int num_row_pixels; // e.g., 3 - * int num_rows; // e.g., 2 - * size_t row_stride_bytes; // e.g., 4 * sizeof(unsigned char) * NUM_CHANNELS - * unsigned char* d_samples; // e.g., [(2, 6, 7, 5),(3, 0, 2, 1),(1, 1, 1, 1),(-, -, -, -), - * // (7, 0, 6, 2),(0, 6, 7, 5),(3, 0, 2, 6),(-, -, -, -)] - * int* d_histogram[3]; // e.g., [[ -, -, -, -],[ -, -, -, -],[ -, -, -, -]]; - * int num_levels[3]; // e.g., {5, 5, 5}; - * unsigned int* d_levels[3]; // e.g., [ [0, 2, 4, 6, 8], - * // [0, 2, 4, 6, 8], - * // [0, 2, 4, 6, 8] ]; - * ... - * - * // Determine temporary device storage requirements - * void* d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, d_levels, num_row_pixels, num_rows, row_stride_bytes); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Compute histograms - * cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes, - * d_samples, d_histogram, num_levels, d_levels, num_row_pixels, num_rows, row_stride_bytes); - * - * // d_histogram <-- [ [2, 3, 0, 1], - * // [3, 0, 0, 2], - * // [1, 2, 0, 3] ] - * - * \endcode - * - * \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) - * \tparam NUM_ACTIVE_CHANNELS [inferred] Number of channels actively being histogrammed - * \tparam SampleIteratorT [inferred] Random-access input iterator type for reading input samples. \iterator - * \tparam CounterT [inferred] Integer type for histogram bin counters - * \tparam LevelT [inferred] Type for specifying boundaries (levels) - * \tparam OffsetT [inferred] Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 - */ - template < - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename SampleIteratorT, - typename CounterT, - typename LevelT, - typename OffsetT> - CUB_RUNTIME_FUNCTION - static cudaError_t MultiHistogramRange( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histogram[i] should be num_levels[i] - 1. - int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT* d_levels[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - /// The sample value type of the input iterator - typedef typename std::iterator_traits::value_type SampleT; - Int2Type is_byte_sample; - - if ((sizeof(OffsetT) > sizeof(int)) && - ((unsigned long long) (num_rows * row_stride_bytes) < (unsigned long long) std::numeric_limits::max())) - { - // Down-convert OffsetT data type - return DipatchHistogram::DispatchRange( - d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, d_levels, - (int) num_row_pixels, (int) num_rows, (int) (row_stride_bytes / sizeof(SampleT)), - stream, debug_synchronous, is_byte_sample); - } - - return DipatchHistogram::DispatchRange( - d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, d_levels, - num_row_pixels, num_rows, (OffsetT) (row_stride_bytes / sizeof(SampleT)), - stream, debug_synchronous, is_byte_sample); - } - - - - //@} end member group -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/device_partition.cuh b/ml-xgboost/cub/cub/device/device_partition.cuh deleted file mode 100644 index d51969e..0000000 --- a/ml-xgboost/cub/cub/device/device_partition.cuh +++ /dev/null @@ -1,273 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DevicePartition provides device-wide, parallel operations for partitioning sequences of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "dispatch/dispatch_select_if.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief DevicePartition provides device-wide, parallel operations for partitioning sequences of data items residing within device-accessible memory. ![](partition_logo.png) - * \ingroup SingleModule - * - * \par Overview - * These operations apply a selection criterion to construct a partitioned output sequence from items selected/unselected from - * a specified input sequence. - * - * \par Usage Considerations - * \cdp_class{DevicePartition} - * - * \par Performance - * \linear_performance{partition} - * - * \par - * The following chart illustrates DevicePartition::If - * performance across different CUDA architectures for \p int32 items, - * where 50% of the items are randomly selected for the first partition. - * \plots_below - * - * \image html partition_if_int32_50_percent.png - * - */ -struct DevicePartition -{ - /** - * \brief Uses the \p d_flags sequence to split the corresponding items from \p d_in into a partitioned sequence \p d_out. The total number of items copied into the first partition is written to \p d_num_selected_out. ![](partition_flags_logo.png) - * - * \par - * - The value type of \p d_flags must be castable to \p bool (e.g., \p bool, \p char, \p int, etc.). - * - Copies of the selected items are compacted into \p d_out and maintain their original - * relative ordering, however copies of the unselected items are compacted into the - * rear of \p d_out in reverse order. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the compaction of items selected from an \p int device vector. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input, flags, and output - * int num_items; // e.g., 8 - * int *d_in; // e.g., [1, 2, 3, 4, 5, 6, 7, 8] - * char *d_flags; // e.g., [1, 0, 0, 1, 0, 1, 1, 0] - * int *d_out; // e.g., [ , , , , , , , ] - * int *d_num_selected_out; // e.g., [ ] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run selection - * cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items); - * - * // d_out <-- [1, 4, 6, 7, 8, 5, 3, 2] - * // d_num_selected_out <-- [4] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam FlagIterator [inferred] Random-access input iterator type for reading selection flags \iterator - * \tparam OutputIteratorT [inferred] Random-access output iterator type for writing output items \iterator - * \tparam NumSelectedIteratorT [inferred] Output iterator type for recording the number of items selected \iterator - */ - template < - typename InputIteratorT, - typename FlagIterator, - typename OutputIteratorT, - typename NumSelectedIteratorT> - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Flagged( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - FlagIterator d_flags, ///< [in] Pointer to the input sequence of selection flags - OutputIteratorT d_out, ///< [out] Pointer to the output sequence of partitioned data items - NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the output total number of items selected (i.e., the offset of the unselected partition) - int num_items, ///< [in] Total number of items to select from - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - typedef int OffsetT; // Signed integer type for global offsets - typedef NullType SelectOp; // Selection op (not used) - typedef NullType EqualityOp; // Equality operator (not used) - - return DispatchSelectIf::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_flags, - d_out, - d_num_selected_out, - SelectOp(), - EqualityOp(), - num_items, - stream, - debug_synchronous); - } - - - /** - * \brief Uses the \p select_op functor to split the corresponding items from \p d_in into a partitioned sequence \p d_out. The total number of items copied into the first partition is written to \p d_num_selected_out. ![](partition_logo.png) - * - * \par - * - Copies of the selected items are compacted into \p d_out and maintain their original - * relative ordering, however copies of the unselected items are compacted into the - * rear of \p d_out in reverse order. - * - \devicestorage - * - * \par Performance - * The following charts illustrate saturated partition-if performance across different - * CUDA architectures for \p int32 and \p int64 items, respectively. Items are - * selected for the first partition with 50% probability. - * - * \image html partition_if_int32_50_percent.png - * \image html partition_if_int64_50_percent.png - * - * \par - * The following charts are similar, but 5% selection probability for the first partition: - * - * \image html partition_if_int32_5_percent.png - * \image html partition_if_int64_5_percent.png - * - * \par Snippet - * The code snippet below illustrates the compaction of items selected from an \p int device vector. - * \par - * \code - * #include // or equivalently - * - * // Functor type for selecting values less than some criteria - * struct LessThan - * { - * int compare; - * - * CUB_RUNTIME_FUNCTION __forceinline__ - * LessThan(int compare) : compare(compare) {} - * - * CUB_RUNTIME_FUNCTION __forceinline__ - * bool operator()(const int &a) const { - * return (a < compare); - * } - * }; - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 8 - * int *d_in; // e.g., [0, 2, 3, 9, 5, 2, 81, 8] - * int *d_out; // e.g., [ , , , , , , , ] - * int *d_num_selected_out; // e.g., [ ] - * LessThan select_op(7); - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run selection - * cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op); - * - * // d_out <-- [0, 2, 3, 5, 2, 8, 81, 9] - * // d_num_selected_out <-- [5] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OutputIteratorT [inferred] Random-access output iterator type for writing output items \iterator - * \tparam NumSelectedIteratorT [inferred] Output iterator type for recording the number of items selected \iterator - * \tparam SelectOp [inferred] Selection functor type having member bool operator()(const T &a) - */ - template < - typename InputIteratorT, - typename OutputIteratorT, - typename NumSelectedIteratorT, - typename SelectOp> - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t If( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output sequence of partitioned data items - NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the output total number of items selected (i.e., the offset of the unselected partition) - int num_items, ///< [in] Total number of items to select from - SelectOp select_op, ///< [in] Unary selection operator - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - typedef int OffsetT; // Signed integer type for global offsets - typedef NullType* FlagIterator; // FlagT iterator type (not used) - typedef NullType EqualityOp; // Equality operator (not used) - - return DispatchSelectIf::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - NULL, - d_out, - d_num_selected_out, - select_op, - EqualityOp(), - num_items, - stream, - debug_synchronous); - } - -}; - -/** - * \example example_device_partition_flagged.cu - * \example example_device_partition_if.cu - */ - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/device_radix_sort.cuh b/ml-xgboost/cub/cub/device/device_radix_sort.cuh deleted file mode 100644 index 1cd2609..0000000 --- a/ml-xgboost/cub/cub/device/device_radix_sort.cuh +++ /dev/null @@ -1,796 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceRadixSort provides device-wide, parallel operations for computing a radix sort across a sequence of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "dispatch/dispatch_radix_sort.cuh" -#include "../util_arch.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief DeviceRadixSort provides device-wide, parallel operations for computing a radix sort across a sequence of data items residing within device-accessible memory. ![](sorting_logo.png) - * \ingroup SingleModule - * - * \par Overview - * The [radix sorting method](http://en.wikipedia.org/wiki/Radix_sort) arranges - * items into ascending (or descending) order. The algorithm relies upon a positional representation for - * keys, i.e., each key is comprised of an ordered sequence of symbols (e.g., digits, - * characters, etc.) specified from least-significant to most-significant. For a - * given input sequence of keys and a set of rules specifying a total ordering - * of the symbolic alphabet, the radix sorting method produces a lexicographic - * ordering of those keys. - * - * \par - * DeviceRadixSort can sort all of the built-in C++ numeric primitive types, e.g.: - * unsigned char, \p int, \p double, etc. Although the direct radix sorting - * method can only be applied to unsigned integral types, DeviceRadixSort - * is able to sort signed and floating-point types via simple bit-wise transformations - * that ensure lexicographic key ordering. - * - * \par Usage Considerations - * \cdp_class{DeviceRadixSort} - * - * \par Performance - * \linear_performance{radix sort} The following chart illustrates DeviceRadixSort::SortKeys - * performance across different CUDA architectures for uniform-random \p uint32 keys. - * \plots_below - * - * \image html lsb_radix_sort_int32_keys.png - * - */ -struct DeviceRadixSort -{ - - /******************************************************************//** - * \name KeyT-value pairs - *********************************************************************/ - //@{ - - /** - * \brief Sorts key-value pairs into ascending order. (~2N auxiliary storage required) - * - * \par - * - The contents of the input data are not altered by the sorting operation - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageNP For sorting using only O(P) temporary storage, see the sorting interface using DoubleBuffer wrappers below. - * - \devicestorage - * - * \par Performance - * The following charts illustrate saturated sorting performance across different - * CUDA architectures for uniform-random uint32,uint32 and - * uint64,uint64 pairs, respectively. - * - * \image html lsb_radix_sort_int32_pairs.png - * \image html lsb_radix_sort_int64_pairs.png - * - * \par Snippet - * The code snippet below illustrates the sorting of a device vector of \p int keys - * with associated vector of \p int values. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_keys_out; // e.g., [ ... ] - * int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] - * int *d_values_out; // e.g., [ ... ] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, - * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, - * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); - * - * // d_keys_out <-- [0, 3, 5, 6, 7, 8, 9] - * // d_values_out <-- [5, 4, 3, 1, 2, 0, 6] - * - * \endcode - * - * \tparam KeyT [inferred] KeyT type - * \tparam ValueT [inferred] ValueT type - */ - template < - typename KeyT, - typename ValueT> - CUB_RUNTIME_FUNCTION - static cudaError_t SortPairs( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort - KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data - const ValueT *d_values_in, ///< [in] Pointer to the corresponding input sequence of associated value items - ValueT *d_values_out, ///< [out] Pointer to the correspondingly-reordered output sequence of associated value items - int num_items, ///< [in] Number of items to sort - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); - DoubleBuffer d_values(const_cast(d_values_in), d_values_out); - - return DispatchRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - begin_bit, - end_bit, - false, - stream, - debug_synchronous); - } - - - /** - * \brief Sorts key-value pairs into ascending order. (~N auxiliary storage required) - * - * \par - * - The sorting operation is given a pair of key buffers and a corresponding - * pair of associated value buffers. Each pair is managed by a DoubleBuffer - * structure that indicates which of the two buffers is "current" (and thus - * contains the input data to be sorted). - * - The contents of both buffers within each pair may be altered by the sorting - * operation. - * - Upon completion, the sorting operation will update the "current" indicator - * within each DoubleBuffer wrapper to reference which of the two buffers - * now contains the sorted output sequence (a function of the number of key bits - * specified and the targeted device architecture). - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageP - * - \devicestorage - * - * \par Performance - * The following charts illustrate saturated sorting performance across different - * CUDA architectures for uniform-random uint32,uint32 and - * uint64,uint64 pairs, respectively. - * - * \image html lsb_radix_sort_int32_pairs.png - * \image html lsb_radix_sort_int64_pairs.png - * - * \par Snippet - * The code snippet below illustrates the sorting of a device vector of \p int keys - * with associated vector of \p int values. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_key_alt_buf; // e.g., [ ... ] - * int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] - * int *d_value_alt_buf; // e.g., [ ... ] - * ... - * - * // Create a set of DoubleBuffers to wrap pairs of device pointers - * cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); - * cub::DoubleBuffer d_values(d_value_buf, d_value_alt_buf); - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); - * - * // d_keys.Current() <-- [0, 3, 5, 6, 7, 8, 9] - * // d_values.Current() <-- [5, 4, 3, 1, 2, 0, 6] - * - * \endcode - * - * \tparam KeyT [inferred] KeyT type - * \tparam ValueT [inferred] ValueT type - */ - template < - typename KeyT, - typename ValueT> - CUB_RUNTIME_FUNCTION - static cudaError_t SortPairs( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - DoubleBuffer &d_values, ///< [in,out] Double-buffer of values whose "current" device-accessible buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values - int num_items, ///< [in] Number of items to sort - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - return DispatchRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - begin_bit, - end_bit, - true, - stream, - debug_synchronous); - } - - - /** - * \brief Sorts key-value pairs into descending order. (~2N auxiliary storage required). - * - * \par - * - The contents of the input data are not altered by the sorting operation - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageNP For sorting using only O(P) temporary storage, see the sorting interface using DoubleBuffer wrappers below. - * - \devicestorage - * - * \par Performance - * Performance is similar to DeviceRadixSort::SortPairs. - * - * \par Snippet - * The code snippet below illustrates the sorting of a device vector of \p int keys - * with associated vector of \p int values. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_keys_out; // e.g., [ ... ] - * int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] - * int *d_values_out; // e.g., [ ... ] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, - * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, - * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); - * - * // d_keys_out <-- [9, 8, 7, 6, 5, 3, 0] - * // d_values_out <-- [6, 0, 2, 1, 3, 4, 5] - * - * \endcode - * - * \tparam KeyT [inferred] KeyT type - * \tparam ValueT [inferred] ValueT type - */ - template < - typename KeyT, - typename ValueT> - CUB_RUNTIME_FUNCTION - static cudaError_t SortPairsDescending( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort - KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data - const ValueT *d_values_in, ///< [in] Pointer to the corresponding input sequence of associated value items - ValueT *d_values_out, ///< [out] Pointer to the correspondingly-reordered output sequence of associated value items - int num_items, ///< [in] Number of items to sort - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); - DoubleBuffer d_values(const_cast(d_values_in), d_values_out); - - return DispatchRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - begin_bit, - end_bit, - false, - stream, - debug_synchronous); - } - - - /** - * \brief Sorts key-value pairs into descending order. (~N auxiliary storage required). - * - * \par - * - The sorting operation is given a pair of key buffers and a corresponding - * pair of associated value buffers. Each pair is managed by a DoubleBuffer - * structure that indicates which of the two buffers is "current" (and thus - * contains the input data to be sorted). - * - The contents of both buffers within each pair may be altered by the sorting - * operation. - * - Upon completion, the sorting operation will update the "current" indicator - * within each DoubleBuffer wrapper to reference which of the two buffers - * now contains the sorted output sequence (a function of the number of key bits - * specified and the targeted device architecture). - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageP - * - \devicestorage - * - * \par Performance - * Performance is similar to DeviceRadixSort::SortPairs. - * - * \par Snippet - * The code snippet below illustrates the sorting of a device vector of \p int keys - * with associated vector of \p int values. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_key_alt_buf; // e.g., [ ... ] - * int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] - * int *d_value_alt_buf; // e.g., [ ... ] - * ... - * - * // Create a set of DoubleBuffers to wrap pairs of device pointers - * cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); - * cub::DoubleBuffer d_values(d_value_buf, d_value_alt_buf); - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); - * - * // d_keys.Current() <-- [9, 8, 7, 6, 5, 3, 0] - * // d_values.Current() <-- [6, 0, 2, 1, 3, 4, 5] - * - * \endcode - * - * \tparam KeyT [inferred] KeyT type - * \tparam ValueT [inferred] ValueT type - */ - template < - typename KeyT, - typename ValueT> - CUB_RUNTIME_FUNCTION - static cudaError_t SortPairsDescending( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - DoubleBuffer &d_values, ///< [in,out] Double-buffer of values whose "current" device-accessible buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values - int num_items, ///< [in] Number of items to sort - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - return DispatchRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - begin_bit, - end_bit, - true, - stream, - debug_synchronous); - } - - - //@} end member group - /******************************************************************//** - * \name Keys-only - *********************************************************************/ - //@{ - - - /** - * \brief Sorts keys into ascending order. (~2N auxiliary storage required) - * - * \par - * - The contents of the input data are not altered by the sorting operation - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageNP For sorting using only O(P) temporary storage, see the sorting interface using DoubleBuffer wrappers below. - * - \devicestorage - * - * \par Performance - * The following charts illustrate saturated sorting performance across different - * CUDA architectures for uniform-random \p uint32 and \p uint64 keys, respectively. - * - * \image html lsb_radix_sort_int32_keys.png - * \image html lsb_radix_sort_int64_keys.png - * - * \par Snippet - * The code snippet below illustrates the sorting of a device vector of \p int keys. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_keys_out; // e.g., [ ... ] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); - * - * // d_keys_out <-- [0, 3, 5, 6, 7, 8, 9] - * - * \endcode - * - * \tparam KeyT [inferred] KeyT type - */ - template - CUB_RUNTIME_FUNCTION - static cudaError_t SortKeys( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort - KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data - int num_items, ///< [in] Number of items to sort - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // Null value type - DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); - DoubleBuffer d_values; - - return DispatchRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - begin_bit, - end_bit, - false, - stream, - debug_synchronous); - } - - - /** - * \brief Sorts keys into ascending order. (~N auxiliary storage required). - * - * \par - * - The sorting operation is given a pair of key buffers managed by a - * DoubleBuffer structure that indicates which of the two buffers is - * "current" (and thus contains the input data to be sorted). - * - The contents of both buffers may be altered by the sorting operation. - * - Upon completion, the sorting operation will update the "current" indicator - * within the DoubleBuffer wrapper to reference which of the two buffers - * now contains the sorted output sequence (a function of the number of key bits - * specified and the targeted device architecture). - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageP - * - \devicestorage - * - * \par Performance - * The following charts illustrate saturated sorting performance across different - * CUDA architectures for uniform-random \p uint32 and \p uint64 keys, respectively. - * - * \image html lsb_radix_sort_int32_keys.png - * \image html lsb_radix_sort_int64_keys.png - * - * \par Snippet - * The code snippet below illustrates the sorting of a device vector of \p int keys. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_key_alt_buf; // e.g., [ ... ] - * ... - * - * // Create a DoubleBuffer to wrap the pair of device pointers - * cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, num_items); - * - * // d_keys.Current() <-- [0, 3, 5, 6, 7, 8, 9] - * - * \endcode - * - * \tparam KeyT [inferred] KeyT type - */ - template - CUB_RUNTIME_FUNCTION - static cudaError_t SortKeys( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - int num_items, ///< [in] Number of items to sort - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // Null value type - DoubleBuffer d_values; - - return DispatchRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - begin_bit, - end_bit, - true, - stream, - debug_synchronous); - } - - /** - * \brief Sorts keys into descending order. (~2N auxiliary storage required). - * - * \par - * - The contents of the input data are not altered by the sorting operation - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageNP For sorting using only O(P) temporary storage, see the sorting interface using DoubleBuffer wrappers below. - * - \devicestorage - * - * \par Performance - * Performance is similar to DeviceRadixSort::SortKeys. - * - * \par Snippet - * The code snippet below illustrates the sorting of a device vector of \p int keys. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_keys_out; // e.g., [ ... ] - * ... - * - * // Create a DoubleBuffer to wrap the pair of device pointers - * cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); - * - * // d_keys_out <-- [9, 8, 7, 6, 5, 3, 0]s - * - * \endcode - * - * \tparam KeyT [inferred] KeyT type - */ - template - CUB_RUNTIME_FUNCTION - static cudaError_t SortKeysDescending( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort - KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data - int num_items, ///< [in] Number of items to sort - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); - DoubleBuffer d_values; - - return DispatchRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - begin_bit, - end_bit, - false, - stream, - debug_synchronous); - } - - - /** - * \brief Sorts keys into descending order. (~N auxiliary storage required). - * - * \par - * - The sorting operation is given a pair of key buffers managed by a - * DoubleBuffer structure that indicates which of the two buffers is - * "current" (and thus contains the input data to be sorted). - * - The contents of both buffers may be altered by the sorting operation. - * - Upon completion, the sorting operation will update the "current" indicator - * within the DoubleBuffer wrapper to reference which of the two buffers - * now contains the sorted output sequence (a function of the number of key bits - * specified and the targeted device architecture). - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageP - * - \devicestorage - * - * \par Performance - * Performance is similar to DeviceRadixSort::SortKeys. - * - * \par Snippet - * The code snippet below illustrates the sorting of a device vector of \p int keys. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_key_alt_buf; // e.g., [ ... ] - * ... - * - * // Create a DoubleBuffer to wrap the pair of device pointers - * cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys, num_items); - * - * // d_keys.Current() <-- [9, 8, 7, 6, 5, 3, 0] - * - * \endcode - * - * \tparam KeyT [inferred] KeyT type - */ - template - CUB_RUNTIME_FUNCTION - static cudaError_t SortKeysDescending( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - int num_items, ///< [in] Number of items to sort - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // Null value type - DoubleBuffer d_values; - - return DispatchRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - begin_bit, - end_bit, - true, - stream, - debug_synchronous); - } - - - //@} end member group - - -}; - -/** - * \example example_device_radix_sort.cu - */ - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/device_reduce.cuh b/ml-xgboost/cub/cub/device/device_reduce.cuh deleted file mode 100644 index 13f5bbb..0000000 --- a/ml-xgboost/cub/cub/device/device_reduce.cuh +++ /dev/null @@ -1,699 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include -#include - -#include "../iterator/arg_index_input_iterator.cuh" -#include "dispatch/dispatch_reduce.cuh" -#include "dispatch/dispatch_reduce_by_key.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within device-accessible memory. ![](reduce_logo.png) - * \ingroup SingleModule - * - * \par Overview - * A reduction (or fold) - * uses a binary combining operator to compute a single aggregate from a sequence of input elements. - * - * \par Usage Considerations - * \cdp_class{DeviceReduce} - * - * \par Performance - * \linear_performance{reduction, reduce-by-key, and run-length encode} - * - * \par - * The following chart illustrates DeviceReduce::Sum - * performance across different CUDA architectures for \p int32 keys. - * - * \image html reduce_int32.png - * - * \par - * The following chart illustrates DeviceReduce::ReduceByKey (summation) - * performance across different CUDA architectures for \p fp32 - * values. Segments are identified by \p int32 keys, and have lengths uniformly sampled from [1,1000]. - * - * \image html reduce_by_key_fp32_len_500.png - * - * \par - * \plots_below - * - */ -struct DeviceReduce -{ - /** - * \brief Computes a device-wide reduction using the specified binary \p reduction_op functor and initial value \p init. - * - * \par - * - Does not support binary reduction operators that are non-commutative. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates a user-defined min-reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // CustomMin functor - * struct CustomMin - * { - * template - * __device__ __forceinline__ - * T operator()(const T &a, const T &b) const { - * return (b < a) ? b : a; - * } - * }; - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 7 - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [-] - * CustomMin min_op; - * int init; // e.g., INT_MAX - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, min_op, init); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run reduction - * cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, min_op, init); - * - * // d_out <-- [0] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate \iterator - * \tparam ReductionOpT [inferred] Binary reduction functor type having member T operator()(const T &a, const T &b) - * \tparam T [inferred] Data element type that is convertible to the \p value type of \p InputIteratorT - */ - template < - typename InputIteratorT, - typename OutputIteratorT, - typename ReductionOpT, - typename T> - CUB_RUNTIME_FUNCTION - static cudaError_t Reduce( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - ReductionOpT reduction_op, ///< [in] Binary reduction functor - T init, ///< [in] Initial value of the reduction - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - return DispatchReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - num_items, - reduction_op, - init, - stream, - debug_synchronous); - } - - - /** - * \brief Computes a device-wide sum using the addition (\p +) operator. - * - * \par - * - Uses \p 0 as the initial value of the reduction. - * - Does not support \p + operators that are non-commutative.. - * - \devicestorage - * - * \par Performance - * The following charts illustrate saturated sum-reduction performance across different - * CUDA architectures for \p int32 and \p int64 items, respectively. - * - * \image html reduce_int32.png - * \image html reduce_int64.png - * - * \par Snippet - * The code snippet below illustrates the sum-reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 7 - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [-] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sum-reduction - * cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); - * - * // d_out <-- [38] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t Sum( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - return DispatchReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - num_items, - cub::Sum(), - OutputT(), // zero-initialize - stream, - debug_synchronous); - } - - - /** - * \brief Computes a device-wide minimum using the less-than ('<') operator. - * - * \par - * - Uses std::numeric_limits::max() as the initial value of the reduction. - * - Does not support \p < operators that are non-commutative. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the min-reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 7 - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [-] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run min-reduction - * cub::DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); - * - * // d_out <-- [0] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t Min( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - return DispatchReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - num_items, - cub::Min(), - Traits::Max(), // replace with std::numeric_limits::max() when C++11 support is more prevalent - stream, - debug_synchronous); - } - - - /** - * \brief Finds the first device-wide minimum using the less-than ('<') operator, also returning the index of that item. - * - * \par - * - The output value type of \p d_out is cub::KeyValuePair (assuming the value type of \p d_in is \p T) - * - The minimum is written to d_out.value and its offset in the input array is written to d_out.key. - * - The {1, std::numeric_limits::max()} tuple is produced for zero-length inputs - * - Does not support \p < operators that are non-commutative. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the argmin-reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 7 - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * KeyValuePair *d_out; // e.g., [{-,-}] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_argmin, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run argmin-reduction - * cub::DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_argmin, num_items); - * - * // d_out <-- [{5, 0}] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items (of some type \p T) \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate (having value type cub::KeyValuePair) \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t ArgMin( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // The input type - typedef typename std::iterator_traits::value_type InputValueT; - - // The output tuple type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - KeyValuePair, // ... then the key value pair OffsetT + InputValueT - typename std::iterator_traits::value_type>::Type OutputTupleT; // ... else the output iterator's value type - - // The output value type - typedef typename OutputTupleT::Value OutputValueT; - - // Wrapped input iterator to produce index-value tuples - typedef ArgIndexInputIterator ArgIndexInputIteratorT; - ArgIndexInputIteratorT d_indexed_in(d_in); - - // Initial value - OutputTupleT initial_value(1, Traits::Max()); // replace with std::numeric_limits::max() when C++11 support is more prevalent - - return DispatchReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_indexed_in, - d_out, - num_items, - cub::ArgMin(), - initial_value, - stream, - debug_synchronous); - } - - - /** - * \brief Computes a device-wide maximum using the greater-than ('>') operator. - * - * \par - * - Uses std::numeric_limits::lowest() as the initial value of the reduction. - * - Does not support \p > operators that are non-commutative. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the max-reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 7 - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [-] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_max, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run max-reduction - * cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_max, num_items); - * - * // d_out <-- [9] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t Max( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - return DispatchReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - num_items, - cub::Max(), - Traits::Lowest(), // replace with std::numeric_limits::lowest() when C++11 support is more prevalent - stream, - debug_synchronous); - } - - - /** - * \brief Finds the first device-wide maximum using the greater-than ('>') operator, also returning the index of that item - * - * \par - * - The output value type of \p d_out is cub::KeyValuePair (assuming the value type of \p d_in is \p T) - * - The maximum is written to d_out.value and its offset in the input array is written to d_out.key. - * - The {1, std::numeric_limits::lowest()} tuple is produced for zero-length inputs - * - Does not support \p > operators that are non-commutative. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the argmax-reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 7 - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * KeyValuePair *d_out; // e.g., [{-,-}] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_argmax, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run argmax-reduction - * cub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_argmax, num_items); - * - * // d_out <-- [{6, 9}] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items (of some type \p T) \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate (having value type cub::KeyValuePair) \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t ArgMax( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // The input type - typedef typename std::iterator_traits::value_type InputValueT; - - // The output tuple type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - KeyValuePair, // ... then the key value pair OffsetT + InputValueT - typename std::iterator_traits::value_type>::Type OutputTupleT; // ... else the output iterator's value type - - // The output value type - typedef typename OutputTupleT::Value OutputValueT; - - // Wrapped input iterator to produce index-value tuples - typedef ArgIndexInputIterator ArgIndexInputIteratorT; - ArgIndexInputIteratorT d_indexed_in(d_in); - - // Initial value - OutputTupleT initial_value(1, Traits::Lowest()); // replace with std::numeric_limits::lowest() when C++11 support is more prevalent - - return DispatchReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_indexed_in, - d_out, - num_items, - cub::ArgMax(), - initial_value, - stream, - debug_synchronous); - } - - - /** - * \brief Reduces segments of values, where segments are demarcated by corresponding runs of identical keys. - * - * \par - * This operation computes segmented reductions within \p d_values_in using - * the specified binary \p reduction_op functor. The segments are identified by - * "runs" of corresponding keys in \p d_keys_in, where runs are maximal ranges of - * consecutive, identical keys. For the ith run encountered, - * the first key of the run and the corresponding value aggregate of that run are - * written to d_unique_out[i] and d_aggregates_out[i], - * respectively. The total number of runs encountered is written to \p d_num_runs_out. - * - * \par - * - The == equality operator is used to determine whether keys are equivalent - * - \devicestorage - * - * \par Performance - * The following chart illustrates reduction-by-key (sum) performance across - * different CUDA architectures for \p fp32 and \p fp64 values, respectively. Segments - * are identified by \p int32 keys, and have lengths uniformly sampled from [1,1000]. - * - * \image html reduce_by_key_fp32_len_500.png - * \image html reduce_by_key_fp64_len_500.png - * - * \par - * The following charts are similar, but with segment lengths uniformly sampled from [1,10]: - * - * \image html reduce_by_key_fp32_len_5.png - * \image html reduce_by_key_fp64_len_5.png - * - * \par Snippet - * The code snippet below illustrates the segmented reduction of \p int values grouped - * by runs of associated \p int keys. - * \par - * \code - * #include // or equivalently - * - * // CustomMin functor - * struct CustomMin - * { - * template - * CUB_RUNTIME_FUNCTION __forceinline__ - * T operator()(const T &a, const T &b) const { - * return (b < a) ? b : a; - * } - * }; - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 8 - * int *d_keys_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8] - * int *d_values_in; // e.g., [0, 7, 1, 6, 2, 5, 3, 4] - * int *d_unique_out; // e.g., [-, -, -, -, -, -, -, -] - * int *d_aggregates_out; // e.g., [-, -, -, -, -, -, -, -] - * int *d_num_runs_out; // e.g., [-] - * CustomMin reduction_op; - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run reduce-by-key - * cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items); - * - * // d_unique_out <-- [0, 2, 9, 5, 8] - * // d_aggregates_out <-- [0, 1, 6, 2, 4] - * // d_num_runs_out <-- [5] - * - * \endcode - * - * \tparam KeysInputIteratorT [inferred] Random-access input iterator type for reading input keys \iterator - * \tparam UniqueOutputIteratorT [inferred] Random-access output iterator type for writing unique output keys \iterator - * \tparam ValuesInputIteratorT [inferred] Random-access input iterator type for reading input values \iterator - * \tparam AggregatesOutputIterator [inferred] Random-access output iterator type for writing output value aggregates \iterator - * \tparam NumRunsOutputIteratorT [inferred] Output iterator type for recording the number of runs encountered \iterator - * \tparam ReductionOpT [inferred] Binary reduction functor type having member T operator()(const T &a, const T &b) - */ - template < - typename KeysInputIteratorT, - typename UniqueOutputIteratorT, - typename ValuesInputIteratorT, - typename AggregatesOutputIteratorT, - typename NumRunsOutputIteratorT, - typename ReductionOpT> - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t ReduceByKey( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - KeysInputIteratorT d_keys_in, ///< [in] Pointer to the input sequence of keys - UniqueOutputIteratorT d_unique_out, ///< [out] Pointer to the output sequence of unique keys (one key per run) - ValuesInputIteratorT d_values_in, ///< [in] Pointer to the input sequence of corresponding values - AggregatesOutputIteratorT d_aggregates_out, ///< [out] Pointer to the output sequence of value aggregates (one aggregate per run) - NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs encountered (i.e., the length of d_unique_out) - ReductionOpT reduction_op, ///< [in] Binary reduction functor - int num_items, ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // FlagT iterator type (not used) - - // Selection op (not used) - - // Default == operator - typedef Equality EqualityOp; - - return DispatchReduceByKey::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys_in, - d_unique_out, - d_values_in, - d_aggregates_out, - d_num_runs_out, - EqualityOp(), - reduction_op, - num_items, - stream, - debug_synchronous); - } - -}; - -/** - * \example example_device_reduce.cu - */ - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/device_run_length_encode.cuh b/ml-xgboost/cub/cub/device/device_run_length_encode.cuh deleted file mode 100644 index ee1a539..0000000 --- a/ml-xgboost/cub/cub/device/device_run_length_encode.cuh +++ /dev/null @@ -1,278 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceRunLengthEncode provides device-wide, parallel operations for computing a run-length encoding across a sequence of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "dispatch/dispatch_rle.cuh" -#include "dispatch/dispatch_reduce_by_key.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief DeviceRunLengthEncode provides device-wide, parallel operations for demarcating "runs" of same-valued items within a sequence residing within device-accessible memory. ![](run_length_encode_logo.png) - * \ingroup SingleModule - * - * \par Overview - * A run-length encoding - * computes a simple compressed representation of a sequence of input elements such that each - * maximal "run" of consecutive same-valued data items is encoded as a single data value along with a - * count of the elements in that run. - * - * \par Usage Considerations - * \cdp_class{DeviceRunLengthEncode} - * - * \par Performance - * \linear_performance{run-length encode} - * - * \par - * The following chart illustrates DeviceRunLengthEncode::RunLengthEncode performance across - * different CUDA architectures for \p int32 items. - * Segments have lengths uniformly sampled from [1,1000]. - * - * \image html rle_int32_len_500.png - * - * \par - * \plots_below - * - */ -struct DeviceRunLengthEncode -{ - - /** - * \brief Computes a run-length encoding of the sequence \p d_in. - * - * \par - * - For the ith run encountered, the first key of the run and its length are written to - * d_unique_out[i] and d_counts_out[i], - * respectively. - * - The total number of runs encountered is written to \p d_num_runs_out. - * - The == equality operator is used to determine whether values are equivalent - * - \devicestorage - * - * \par Performance - * The following charts illustrate saturated encode performance across different - * CUDA architectures for \p int32 and \p int64 items, respectively. Segments have - * lengths uniformly sampled from [1,1000]. - * - * \image html rle_int32_len_500.png - * \image html rle_int64_len_500.png - * - * \par - * The following charts are similar, but with segment lengths uniformly sampled from [1,10]: - * - * \image html rle_int32_len_5.png - * \image html rle_int64_len_5.png - * - * \par Snippet - * The code snippet below illustrates the run-length encoding of a sequence of \p int values. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 8 - * int *d_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8] - * int *d_unique_out; // e.g., [ , , , , , , , ] - * int *d_counts_out; // e.g., [ , , , , , , , ] - * int *d_num_runs_out; // e.g., [ ] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceRunLengthEncode::Encode(d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run encoding - * cub::DeviceRunLengthEncode::Encode(d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items); - * - * // d_unique_out <-- [0, 2, 9, 5, 8] - * // d_counts_out <-- [1, 2, 1, 3, 1] - * // d_num_runs_out <-- [5] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam UniqueOutputIteratorT [inferred] Random-access output iterator type for writing unique output items \iterator - * \tparam LengthsOutputIteratorT [inferred] Random-access output iterator type for writing output counts \iterator - * \tparam NumRunsOutputIteratorT [inferred] Output iterator type for recording the number of runs encountered \iterator - */ - template < - typename InputIteratorT, - typename UniqueOutputIteratorT, - typename LengthsOutputIteratorT, - typename NumRunsOutputIteratorT> - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Encode( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of keys - UniqueOutputIteratorT d_unique_out, ///< [out] Pointer to the output sequence of unique keys (one key per run) - LengthsOutputIteratorT d_counts_out, ///< [out] Pointer to the output sequence of run-lengths (one count per run) - NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs - int num_items, ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - typedef int OffsetT; // Signed integer type for global offsets - typedef NullType* FlagIterator; // FlagT iterator type (not used) - typedef NullType SelectOp; // Selection op (not used) - typedef Equality EqualityOp; // Default == operator - typedef cub::Sum ReductionOp; // Value reduction operator - - // The lengths output value type - typedef typename If<(Equals::value_type, void>::VALUE), // LengthT = (if output iterator's value type is void) ? - OffsetT, // ... then the OffsetT type, - typename std::iterator_traits::value_type>::Type LengthT; // ... else the output iterator's value type - - // Generator type for providing 1s values for run-length reduction - typedef ConstantInputIterator LengthsInputIteratorT; - - return DispatchReduceByKey::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_unique_out, - LengthsInputIteratorT((LengthT) 1), - d_counts_out, - d_num_runs_out, - EqualityOp(), - ReductionOp(), - num_items, - stream, - debug_synchronous); - } - - - /** - * \brief Enumerates the starting offsets and lengths of all non-trivial runs (of length > 1) of same-valued keys in the sequence \p d_in. - * - * \par - * - For the ith non-trivial run, the run's starting offset - * and its length are written to d_offsets_out[i] and - * d_lengths_out[i], respectively. - * - The total number of runs encountered is written to \p d_num_runs_out. - * - The == equality operator is used to determine whether values are equivalent - * - \devicestorage - * - * \par Performance - * - * \par Snippet - * The code snippet below illustrates the identification of non-trivial runs within a sequence of \p int values. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 8 - * int *d_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8] - * int *d_offsets_out; // e.g., [ , , , , , , , ] - * int *d_lengths_out; // e.g., [ , , , , , , , ] - * int *d_num_runs_out; // e.g., [ ] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceRunLengthEncode::NonTrivialRuns(d_temp_storage, temp_storage_bytes, d_in, d_offsets_out, d_lengths_out, d_num_runs_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run encoding - * cub::DeviceRunLengthEncode::NonTrivialRuns(d_temp_storage, temp_storage_bytes, d_in, d_offsets_out, d_lengths_out, d_num_runs_out, num_items); - * - * // d_offsets_out <-- [1, 4] - * // d_lengths_out <-- [2, 3] - * // d_num_runs_out <-- [2] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OffsetsOutputIteratorT [inferred] Random-access output iterator type for writing run-offset values \iterator - * \tparam LengthsOutputIteratorT [inferred] Random-access output iterator type for writing run-length values \iterator - * \tparam NumRunsOutputIteratorT [inferred] Output iterator type for recording the number of runs encountered \iterator - */ - template < - typename InputIteratorT, - typename OffsetsOutputIteratorT, - typename LengthsOutputIteratorT, - typename NumRunsOutputIteratorT> - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t NonTrivialRuns( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to input sequence of data items - OffsetsOutputIteratorT d_offsets_out, ///< [out] Pointer to output sequence of run-offsets (one offset per non-trivial run) - LengthsOutputIteratorT d_lengths_out, ///< [out] Pointer to output sequence of run-lengths (one count per non-trivial run) - NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs (i.e., length of \p d_offsets_out) - int num_items, ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - typedef int OffsetT; // Signed integer type for global offsets - typedef Equality EqualityOp; // Default == operator - - return DeviceRleDispatch::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_offsets_out, - d_lengths_out, - d_num_runs_out, - EqualityOp(), - num_items, - stream, - debug_synchronous); - } - - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/device_scan.cuh b/ml-xgboost/cub/cub/device/device_scan.cuh deleted file mode 100644 index 1395040..0000000 --- a/ml-xgboost/cub/cub/device/device_scan.cuh +++ /dev/null @@ -1,423 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceScan provides device-wide, parallel operations for computing a prefix scan across a sequence of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "dispatch/dispatch_scan.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief DeviceScan provides device-wide, parallel operations for computing a prefix scan across a sequence of data items residing within device-accessible memory. ![](device_scan.png) - * \ingroup SingleModule - * - * \par Overview - * Given a sequence of input elements and a binary reduction operator, a [prefix scan](http://en.wikipedia.org/wiki/Prefix_sum) - * produces an output sequence where each element is computed to be the reduction - * of the elements occurring earlier in the input sequence. Prefix sum - * connotes a prefix scan with the addition operator. The term \em inclusive indicates - * that the ith output reduction incorporates the ith input. - * The term \em exclusive indicates the ith input is not incorporated into - * the ith output reduction. - * - * \par - * As of CUB 1.0.1 (2013), CUB's device-wide scan APIs have implemented our "decoupled look-back" algorithm - * for performing global prefix scan with only a single pass through the - * input data, as described in our 2016 technical report [1]. The central - * idea is to leverage a small, constant factor of redundant work in order to overlap the latencies - * of global prefix propagation with local computation. As such, our algorithm requires only - * ~2n data movement (n inputs are read, n outputs are written), and typically - * proceeds at "memcpy" speeds. - * - * \par - * [1] [Duane Merrill and Michael Garland. "Single-pass Parallel Prefix Scan with Decoupled Look-back", NVIDIA Technical Report NVR-2016-002, 2016.](https://research.nvidia.com/publication/single-pass-parallel-prefix-scan-decoupled-look-back) - * - * \par Usage Considerations - * \cdp_class{DeviceScan} - * - * \par Performance - * \linear_performance{prefix scan} - * - * \par - * The following chart illustrates DeviceScan::ExclusiveSum - * performance across different CUDA architectures for \p int32 keys. - * \plots_below - * - * \image html scan_int32.png - * - */ -struct DeviceScan -{ - /******************************************************************//** - * \name Exclusive scans - *********************************************************************/ - //@{ - - /** - * \brief Computes a device-wide exclusive prefix sum. The value of 0 is applied as the initial value, and is assigned to *d_out. - * - * \par - * - Supports non-commutative sum operators. - * - \devicestorage - * - * \par Performance - * The following charts illustrate saturated exclusive sum performance across different - * CUDA architectures for \p int32 and \p int64 items, respectively. - * - * \image html scan_int32.png - * \image html scan_int64.png - * - * \par Snippet - * The code snippet below illustrates the exclusive prefix sum of an \p int device vector. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 7 - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [ , , , , , , ] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run exclusive prefix sum - * cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); - * - * // d_out s<-- [0, 8, 14, 21, 26, 29, 29] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading scan inputs \iterator - * \tparam OutputIteratorT [inferred] Random-access output iterator type for writing scan outputs \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t ExclusiveSum( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items - int num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - // Initial value - OutputT init_value = 0; - - return DispatchScan::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - Sum(), - init_value, - num_items, - stream, - debug_synchronous); - } - - - /** - * \brief Computes a device-wide exclusive prefix scan using the specified binary \p scan_op functor. The \p init_value value is applied as the initial value, and is assigned to *d_out. - * - * \par - * - Supports non-commutative scan operators. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the exclusive prefix min-scan of an \p int device vector - * \par - * \code - * #include // or equivalently - * - * // CustomMin functor - * struct CustomMin - * { - * template - * CUB_RUNTIME_FUNCTION __forceinline__ - * T operator()(const T &a, const T &b) const { - * return (b < a) ? b : a; - * } - * }; - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 7 - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [ , , , , , , ] - * CustomMin min_op - * ... - * - * // Determine temporary device storage requirements for exclusive prefix scan - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceScan::ExclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, min_op, (int) MAX_INT, num_items); - * - * // Allocate temporary storage for exclusive prefix scan - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run exclusive prefix min-scan - * cub::DeviceScan::ExclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, min_op, (int) MAX_INT, num_items); - * - * // d_out <-- [2147483647, 8, 6, 6, 5, 3, 0] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading scan inputs \iterator - * \tparam OutputIteratorT [inferred] Random-access output iterator type for writing scan outputs \iterator - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - * \tparam Identity [inferred] Type of the \p identity value used Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template < - typename InputIteratorT, - typename OutputIteratorT, - typename ScanOpT, - typename InitValueT> - CUB_RUNTIME_FUNCTION - static cudaError_t ExclusiveScan( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items - ScanOpT scan_op, ///< [in] Binary scan functor - InitValueT init_value, ///< [in] Initial value to seed the exclusive scan (and is assigned to *d_out) - int num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - return DispatchScan::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - scan_op, - init_value, - num_items, - stream, - debug_synchronous); - } - - - //@} end member group - /******************************************************************//** - * \name Inclusive scans - *********************************************************************/ - //@{ - - - /** - * \brief Computes a device-wide inclusive prefix sum. - * - * \par - * - Supports non-commutative sum operators. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the inclusive prefix sum of an \p int device vector. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 7 - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [ , , , , , , ] - * ... - * - * // Determine temporary device storage requirements for inclusive prefix sum - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); - * - * // Allocate temporary storage for inclusive prefix sum - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run inclusive prefix sum - * cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); - * - * // d_out <-- [8, 14, 21, 26, 29, 29, 38] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading scan inputs \iterator - * \tparam OutputIteratorT [inferred] Random-access output iterator type for writing scan outputs \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t InclusiveSum( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items - int num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - return DispatchScan::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - Sum(), - NullType(), - num_items, - stream, - debug_synchronous); - } - - - /** - * \brief Computes a device-wide inclusive prefix scan using the specified binary \p scan_op functor. - * - * \par - * - Supports non-commutative scan operators. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the inclusive prefix min-scan of an \p int device vector. - * \par - * \code - * #include // or equivalently - * - * // CustomMin functor - * struct CustomMin - * { - * template - * CUB_RUNTIME_FUNCTION __forceinline__ - * T operator()(const T &a, const T &b) const { - * return (b < a) ? b : a; - * } - * }; - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 7 - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [ , , , , , , ] - * CustomMin min_op; - * ... - * - * // Determine temporary device storage requirements for inclusive prefix scan - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceScan::InclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, min_op, num_items); - * - * // Allocate temporary storage for inclusive prefix scan - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run inclusive prefix min-scan - * cub::DeviceScan::InclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, min_op, num_items); - * - * // d_out <-- [8, 6, 6, 5, 3, 0, 0] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading scan inputs \iterator - * \tparam OutputIteratorT [inferred] Random-access output iterator type for writing scan outputs \iterator - * \tparam ScanOp [inferred] Binary scan functor type having member T operator()(const T &a, const T &b) - */ - template < - typename InputIteratorT, - typename OutputIteratorT, - typename ScanOpT> - CUB_RUNTIME_FUNCTION - static cudaError_t InclusiveScan( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items - ScanOpT scan_op, ///< [in] Binary scan functor - int num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - return DispatchScan::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - scan_op, - NullType(), - num_items, - stream, - debug_synchronous); - } - - //@} end member group - -}; - -/** - * \example example_device_scan.cu - */ - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/device_segmented_radix_sort.cuh b/ml-xgboost/cub/cub/device/device_segmented_radix_sort.cuh deleted file mode 100644 index 2d6a8f4..0000000 --- a/ml-xgboost/cub/cub/device/device_segmented_radix_sort.cuh +++ /dev/null @@ -1,855 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceSegmentedRadixSort provides device-wide, parallel operations for computing a batched radix sort across multiple, non-overlapping sequences of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "dispatch/dispatch_radix_sort.cuh" -#include "../util_arch.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief DeviceSegmentedRadixSort provides device-wide, parallel operations for computing a batched radix sort across multiple, non-overlapping sequences of data items residing within device-accessible memory. ![](segmented_sorting_logo.png) - * \ingroup SegmentedModule - * - * \par Overview - * The [radix sorting method](http://en.wikipedia.org/wiki/Radix_sort) arranges - * items into ascending (or descending) order. The algorithm relies upon a positional representation for - * keys, i.e., each key is comprised of an ordered sequence of symbols (e.g., digits, - * characters, etc.) specified from least-significant to most-significant. For a - * given input sequence of keys and a set of rules specifying a total ordering - * of the symbolic alphabet, the radix sorting method produces a lexicographic - * ordering of those keys. - * - * \par - * DeviceSegmentedRadixSort can sort all of the built-in C++ numeric primitive types, e.g.: - * unsigned char, \p int, \p double, etc. Although the direct radix sorting - * method can only be applied to unsigned integral types, DeviceSegmentedRadixSort - * is able to sort signed and floating-point types via simple bit-wise transformations - * that ensure lexicographic key ordering. - * - * \par Usage Considerations - * \cdp_class{DeviceSegmentedRadixSort} - * - */ -struct DeviceSegmentedRadixSort -{ - - /******************************************************************//** - * \name Key-value pairs - *********************************************************************/ - //@{ - - /** - * \brief Sorts segments of key-value pairs into ascending order. (~2N auxiliary storage required) - * - * \par - * - The contents of the input data are not altered by the sorting operation - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageNP For sorting using only O(P) temporary storage, see the sorting interface using DoubleBuffer wrappers below. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys - * with associated vector of \p int values. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_keys_out; // e.g., [-, -, -, -, -, -, -] - * int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] - * int *d_values_out; // e.g., [-, -, -, -, -, -, -] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, - * d_keys_in, d_keys_out, d_values_in, d_values_out, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, - * d_keys_in, d_keys_out, d_values_in, d_values_out, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // d_keys_out <-- [6, 7, 8, 0, 3, 5, 9] - * // d_values_out <-- [1, 2, 0, 5, 4, 3, 6] - * - * \endcode - * - * \tparam KeyT [inferred] Key type - * \tparam ValueT [inferred] Value type - */ - template < - typename KeyT, - typename ValueT> - CUB_RUNTIME_FUNCTION - static cudaError_t SortPairs( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - const KeyT *d_keys_in, ///< [in] %Device-accessible pointer to the input data of key data to sort - KeyT *d_keys_out, ///< [out] %Device-accessible pointer to the sorted output sequence of key data - const ValueT *d_values_in, ///< [in] %Device-accessible pointer to the corresponding input sequence of associated value items - ValueT *d_values_out, ///< [out] %Device-accessible pointer to the correspondingly-reordered output sequence of associated value items - int num_items, ///< [in] The total number of items to sort (across all segments) - int num_segments, ///< [in] The number of segments that comprise the sorting data - const int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - const int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); - DoubleBuffer d_values(const_cast(d_values_in), d_values_out); - - return DispatchSegmentedRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - num_segments, - d_begin_offsets, - d_end_offsets, - begin_bit, - end_bit, - false, - stream, - debug_synchronous); - } - - - /** - * \brief Sorts segments of key-value pairs into ascending order. (~N auxiliary storage required) - * - * \par - * - The sorting operation is given a pair of key buffers and a corresponding - * pair of associated value buffers. Each pair is managed by a DoubleBuffer - * structure that indicates which of the two buffers is "current" (and thus - * contains the input data to be sorted). - * - The contents of both buffers within each pair may be altered by the sorting - * operation. - * - Upon completion, the sorting operation will update the "current" indicator - * within each DoubleBuffer wrapper to reference which of the two buffers - * now contains the sorted output sequence (a function of the number of key bits - * specified and the targeted device architecture). - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageP - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys - * with associated vector of \p int values. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] - * int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] - * int *d_value_alt_buf; // e.g., [-, -, -, -, -, -, -] - * ... - * - * // Create a set of DoubleBuffers to wrap pairs of device pointers - * cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); - * cub::DoubleBuffer d_values(d_value_buf, d_value_alt_buf); - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // d_keys.Current() <-- [6, 7, 8, 0, 3, 5, 9] - * // d_values.Current() <-- [5, 4, 3, 1, 2, 0, 6] - * - * \endcode - * - * \tparam KeyT [inferred] Key type - * \tparam ValueT [inferred] Value type - */ - template < - typename KeyT, - typename ValueT> - CUB_RUNTIME_FUNCTION - static cudaError_t SortPairs( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - DoubleBuffer &d_values, ///< [in,out] Double-buffer of values whose "current" device-accessible buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values - int num_items, ///< [in] The total number of items to sort (across all segments) - int num_segments, ///< [in] The number of segments that comprise the sorting data - const int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - const int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - return DispatchSegmentedRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - num_segments, - d_begin_offsets, - d_end_offsets, - begin_bit, - end_bit, - true, - stream, - debug_synchronous); - } - - - /** - * \brief Sorts segments of key-value pairs into descending order. (~2N auxiliary storage required). - * - * \par - * - The contents of the input data are not altered by the sorting operation - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageNP For sorting using only O(P) temporary storage, see the sorting interface using DoubleBuffer wrappers below. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys - * with associated vector of \p int values. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_keys_out; // e.g., [-, -, -, -, -, -, -] - * int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] - * int *d_values_out; // e.g., [-, -, -, -, -, -, -] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, - * d_keys_in, d_keys_out, d_values_in, d_values_out, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, - * d_keys_in, d_keys_out, d_values_in, d_values_out, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // d_keys_out <-- [8, 7, 6, 9, 5, 3, 0] - * // d_values_out <-- [0, 2, 1, 6, 3, 4, 5] - * - * \endcode - * - * \tparam KeyT [inferred] Key type - * \tparam ValueT [inferred] Value type - */ - template < - typename KeyT, - typename ValueT> - CUB_RUNTIME_FUNCTION - static cudaError_t SortPairsDescending( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - const KeyT *d_keys_in, ///< [in] %Device-accessible pointer to the input data of key data to sort - KeyT *d_keys_out, ///< [out] %Device-accessible pointer to the sorted output sequence of key data - const ValueT *d_values_in, ///< [in] %Device-accessible pointer to the corresponding input sequence of associated value items - ValueT *d_values_out, ///< [out] %Device-accessible pointer to the correspondingly-reordered output sequence of associated value items - int num_items, ///< [in] The total number of items to sort (across all segments) - int num_segments, ///< [in] The number of segments that comprise the sorting data - const int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - const int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); - DoubleBuffer d_values(const_cast(d_values_in), d_values_out); - - return DispatchSegmentedRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - num_segments, - d_begin_offsets, - d_end_offsets, - begin_bit, - end_bit, - false, - stream, - debug_synchronous); - } - - - /** - * \brief Sorts segments of key-value pairs into descending order. (~N auxiliary storage required). - * - * \par - * - The sorting operation is given a pair of key buffers and a corresponding - * pair of associated value buffers. Each pair is managed by a DoubleBuffer - * structure that indicates which of the two buffers is "current" (and thus - * contains the input data to be sorted). - * - The contents of both buffers within each pair may be altered by the sorting - * operation. - * - Upon completion, the sorting operation will update the "current" indicator - * within each DoubleBuffer wrapper to reference which of the two buffers - * now contains the sorted output sequence (a function of the number of key bits - * specified and the targeted device architecture). - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageP - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys - * with associated vector of \p int values. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] - * int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] - * int *d_value_alt_buf; // e.g., [-, -, -, -, -, -, -] - * ... - * - * // Create a set of DoubleBuffers to wrap pairs of device pointers - * cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); - * cub::DoubleBuffer d_values(d_value_buf, d_value_alt_buf); - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys, d_values, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys, d_values, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // d_keys.Current() <-- [8, 7, 6, 9, 5, 3, 0] - * // d_values.Current() <-- [0, 2, 1, 6, 3, 4, 5] - * - * \endcode - * - * \tparam KeyT [inferred] Key type - * \tparam ValueT [inferred] Value type - */ - template < - typename KeyT, - typename ValueT> - CUB_RUNTIME_FUNCTION - static cudaError_t SortPairsDescending( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - DoubleBuffer &d_values, ///< [in,out] Double-buffer of values whose "current" device-accessible buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values - int num_items, ///< [in] The total number of items to sort (across all segments) - int num_segments, ///< [in] The number of segments that comprise the sorting data - const int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - const int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - return DispatchSegmentedRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - num_segments, - d_begin_offsets, - d_end_offsets, - begin_bit, - end_bit, - true, - stream, - debug_synchronous); - } - - - //@} end member group - /******************************************************************//** - * \name Keys-only - *********************************************************************/ - //@{ - - - /** - * \brief Sorts segments of keys into ascending order. (~2N auxiliary storage required) - * - * \par - * - The contents of the input data are not altered by the sorting operation - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - \devicestorageNP For sorting using only O(P) temporary storage, see the sorting interface using DoubleBuffer wrappers below. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_keys_out; // e.g., [-, -, -, -, -, -, -] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceSegmentedRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // d_keys_out <-- [6, 7, 8, 0, 3, 5, 9] - * - * \endcode - * - * \tparam KeyT [inferred] Key type - */ - template - CUB_RUNTIME_FUNCTION - static cudaError_t SortKeys( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - const KeyT *d_keys_in, ///< [in] %Device-accessible pointer to the input data of key data to sort - KeyT *d_keys_out, ///< [out] %Device-accessible pointer to the sorted output sequence of key data - int num_items, ///< [in] The total number of items to sort (across all segments) - int num_segments, ///< [in] The number of segments that comprise the sorting data - const int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - const int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // Null value type - DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); - DoubleBuffer d_values; - - return DispatchSegmentedRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - num_segments, - d_begin_offsets, - d_end_offsets, - begin_bit, - end_bit, - false, - stream, - debug_synchronous); - } - - - /** - * \brief Sorts segments of keys into ascending order. (~N auxiliary storage required). - * - * \par - * - The sorting operation is given a pair of key buffers managed by a - * DoubleBuffer structure that indicates which of the two buffers is - * "current" (and thus contains the input data to be sorted). - * - The contents of both buffers may be altered by the sorting operation. - * - Upon completion, the sorting operation will update the "current" indicator - * within the DoubleBuffer wrapper to reference which of the two buffers - * now contains the sorted output sequence (a function of the number of key bits - * specified and the targeted device architecture). - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageP - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] - * ... - * - * // Create a DoubleBuffer to wrap the pair of device pointers - * cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceSegmentedRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // d_keys.Current() <-- [6, 7, 8, 0, 3, 5, 9] - * - * \endcode - * - * \tparam KeyT [inferred] Key type - */ - template - CUB_RUNTIME_FUNCTION - static cudaError_t SortKeys( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - int num_items, ///< [in] The total number of items to sort (across all segments) - int num_segments, ///< [in] The number of segments that comprise the sorting data - const int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - const int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // Null value type - DoubleBuffer d_values; - - return DispatchSegmentedRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - num_segments, - d_begin_offsets, - d_end_offsets, - begin_bit, - end_bit, - true, - stream, - debug_synchronous); - } - - /** - * \brief Sorts segments of keys into descending order. (~2N auxiliary storage required). - * - * \par - * - The contents of the input data are not altered by the sorting operation - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageNP For sorting using only O(P) temporary storage, see the sorting interface using DoubleBuffer wrappers below. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_keys_out; // e.g., [-, -, -, -, -, -, -] - * ... - * - * // Create a DoubleBuffer to wrap the pair of device pointers - * cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceSegmentedRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // d_keys_out <-- [8, 7, 6, 9, 5, 3, 0] - * - * \endcode - * - * \tparam KeyT [inferred] Key type - */ - template - CUB_RUNTIME_FUNCTION - static cudaError_t SortKeysDescending( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - const KeyT *d_keys_in, ///< [in] %Device-accessible pointer to the input data of key data to sort - KeyT *d_keys_out, ///< [out] %Device-accessible pointer to the sorted output sequence of key data - int num_items, ///< [in] The total number of items to sort (across all segments) - int num_segments, ///< [in] The number of segments that comprise the sorting data - const int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - const int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - DoubleBuffer d_keys(const_cast(d_keys_in), d_keys_out); - DoubleBuffer d_values; - - return DispatchSegmentedRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - num_segments, - d_begin_offsets, - d_end_offsets, - begin_bit, - end_bit, - false, - stream, - debug_synchronous); - } - - - /** - * \brief Sorts segments of keys into descending order. (~N auxiliary storage required). - * - * \par - * - The sorting operation is given a pair of key buffers managed by a - * DoubleBuffer structure that indicates which of the two buffers is - * "current" (and thus contains the input data to be sorted). - * - The contents of both buffers may be altered by the sorting operation. - * - Upon completion, the sorting operation will update the "current" indicator - * within the DoubleBuffer wrapper to reference which of the two buffers - * now contains the sorted output sequence (a function of the number of key bits - * specified and the targeted device architecture). - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - An optional bit subrange [begin_bit, end_bit) of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. - * - \devicestorageP - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for sorting data - * int num_items; // e.g., 7 - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] - * ... - * - * // Create a DoubleBuffer to wrap the pair of device pointers - * cub::DoubleBuffer d_keys(d_key_buf, d_key_alt_buf); - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sorting operation - * cub::DeviceSegmentedRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys, - * num_items, num_segments, d_offsets, d_offsets + 1); - * - * // d_keys.Current() <-- [8, 7, 6, 9, 5, 3, 0] - * - * \endcode - * - * \tparam KeyT [inferred] Key type - */ - template - CUB_RUNTIME_FUNCTION - static cudaError_t SortKeysDescending( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - int num_items, ///< [in] The total number of items to sort (across all segments) - int num_segments, ///< [in] The number of segments that comprise the sorting data - const int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - const int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int begin_bit = 0, ///< [in] [optional] The least-significant bit index (inclusive) needed for key comparison - int end_bit = sizeof(KeyT) * 8, ///< [in] [optional] The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // Null value type - DoubleBuffer d_values; - - return DispatchSegmentedRadixSort::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys, - d_values, - num_items, - num_segments, - d_begin_offsets, - d_end_offsets, - begin_bit, - end_bit, - true, - stream, - debug_synchronous); - } - - - //@} end member group - - -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/device_segmented_reduce.cuh b/ml-xgboost/cub/cub/device/device_segmented_reduce.cuh deleted file mode 100644 index 66f1cdf..0000000 --- a/ml-xgboost/cub/cub/device/device_segmented_reduce.cuh +++ /dev/null @@ -1,607 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceSegmentedReduce provides device-wide, parallel operations for computing a batched reduction across multiple sequences of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "../iterator/arg_index_input_iterator.cuh" -#include "dispatch/dispatch_reduce.cuh" -#include "dispatch/dispatch_reduce_by_key.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief DeviceSegmentedReduce provides device-wide, parallel operations for computing a reduction across multiple sequences of data items residing within device-accessible memory. ![](reduce_logo.png) - * \ingroup SegmentedModule - * - * \par Overview - * A reduction (or fold) - * uses a binary combining operator to compute a single aggregate from a sequence of input elements. - * - * \par Usage Considerations - * \cdp_class{DeviceSegmentedReduce} - * - */ -struct DeviceSegmentedReduce -{ - /** - * \brief Computes a device-wide segmented reduction using the specified binary \p reduction_op functor. - * - * \par - * - Does not support binary reduction operators that are non-commutative. - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates a custom min-reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // CustomMin functor - * struct CustomMin - * { - * template - * CUB_RUNTIME_FUNCTION __forceinline__ - * T operator()(const T &a, const T &b) const { - * return (b < a) ? b : a; - * } - * }; - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [-, -, -] - * CustomMin min_op; - * int initial_value; // e.g., INT_MAX - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1, min_op, initial_value); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run reduction - * cub::DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1, min_op, initial_value); - * - * // d_out <-- [6, INT_MAX, 0] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate \iterator - * \tparam ReductionOp [inferred] Binary reduction functor type having member T operator()(const T &a, const T &b) - * \tparam T [inferred] Data element type that is convertible to the \p value type of \p InputIteratorT - */ - template < - typename InputIteratorT, - typename OutputIteratorT, - typename ReductionOp, - typename T> - CUB_RUNTIME_FUNCTION - static cudaError_t Reduce( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_segments, ///< [in] The number of segments that comprise the sorting data - int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - ReductionOp reduction_op, ///< [in] Binary reduction functor - T initial_value, ///< [in] Initial value of the reduction for each segment - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - return DispatchSegmentedReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - num_segments, - d_begin_offsets, - d_end_offsets, - reduction_op, - initial_value, - stream, - debug_synchronous); - } - - - /** - * \brief Computes a device-wide segmented sum using the addition ('+') operator. - * - * \par - * - Uses \p 0 as the initial value of the reduction for each segment. - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - Does not support \p + operators that are non-commutative.. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the sum reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [-, -, -] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run sum-reduction - * cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1); - * - * // d_out <-- [21, 0, 17] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t Sum( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_segments, ///< [in] The number of segments that comprise the sorting data - int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - return DispatchSegmentedReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - num_segments, - d_begin_offsets, - d_end_offsets, - cub::Sum(), - OutputT(), // zero-initialize - stream, - debug_synchronous); - } - - - /** - * \brief Computes a device-wide segmented minimum using the less-than ('<') operator. - * - * \par - * - Uses std::numeric_limits::max() as the initial value of the reduction for each segment. - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - Does not support \p < operators that are non-commutative. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the min-reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [-, -, -] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run min-reduction - * cub::DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1); - * - * // d_out <-- [6, INT_MAX, 0] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t Min( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_segments, ///< [in] The number of segments that comprise the sorting data - int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - return DispatchSegmentedReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - num_segments, - d_begin_offsets, - d_end_offsets, - cub::Min(), - Traits::Max(), // replace with std::numeric_limits::max() when C++11 support is more prevalent - stream, - debug_synchronous); - } - - - /** - * \brief Finds the first device-wide minimum in each segment using the less-than ('<') operator, also returning the in-segment index of that item. - * - * \par - * - The output value type of \p d_out is cub::KeyValuePair (assuming the value type of \p d_in is \p T) - * - The minimum of the ith segment is written to d_out[i].value and its offset in that segment is written to d_out[i].key. - * - The {1, std::numeric_limits::max()} tuple is produced for zero-length inputs - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - Does not support \p < operators that are non-commutative. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the argmin-reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * KeyValuePair *d_out; // e.g., [{-,-}, {-,-}, {-,-}] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run argmin-reduction - * cub::DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1); - * - * // d_out <-- [{1,6}, {1,INT_MAX}, {2,0}] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items (of some type \p T) \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate (having value type KeyValuePair) \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t ArgMin( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_segments, ///< [in] The number of segments that comprise the sorting data - int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // The input type - typedef typename std::iterator_traits::value_type InputValueT; - - // The output tuple type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - KeyValuePair, // ... then the key value pair OffsetT + InputValueT - typename std::iterator_traits::value_type>::Type OutputTupleT; // ... else the output iterator's value type - - // The output value type - typedef typename OutputTupleT::Value OutputValueT; - - // Wrapped input iterator to produce index-value tuples - typedef ArgIndexInputIterator ArgIndexInputIteratorT; - ArgIndexInputIteratorT d_indexed_in(d_in); - - // Initial value - OutputTupleT initial_value(1, Traits::Max()); // replace with std::numeric_limits::max() when C++11 support is more prevalent - - return DispatchSegmentedReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_indexed_in, - d_out, - num_segments, - d_begin_offsets, - d_end_offsets, - cub::ArgMin(), - initial_value, - stream, - debug_synchronous); - } - - - /** - * \brief Computes a device-wide segmented maximum using the greater-than ('>') operator. - * - * \par - * - Uses std::numeric_limits::lowest() as the initial value of the reduction. - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - Does not support \p > operators that are non-commutative. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the max-reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * int *d_out; // e.g., [-, -, -] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run max-reduction - * cub::DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1); - * - * // d_out <-- [8, INT_MIN, 9] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t Max( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_segments, ///< [in] The number of segments that comprise the sorting data - int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - return DispatchSegmentedReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - num_segments, - d_begin_offsets, - d_end_offsets, - cub::Max(), - Traits::Lowest(), // replace with std::numeric_limits::lowest() when C++11 support is more prevalent - stream, - debug_synchronous); - } - - - /** - * \brief Finds the first device-wide maximum in each segment using the greater-than ('>') operator, also returning the in-segment index of that item - * - * \par - * - The output value type of \p d_out is cub::KeyValuePair (assuming the value type of \p d_in is \p T) - * - The maximum of the ith segment is written to d_out[i].value and its offset in that segment is written to d_out[i].key. - * - The {1, std::numeric_limits::lowest()} tuple is produced for zero-length inputs - * - When input a contiguous sequence of segments, a single sequence - * \p segment_offsets (of length num_segments+1) can be aliased - * for both the \p d_begin_offsets and \p d_end_offsets parameters (where - * the latter is specified as segment_offsets+1). - * - Does not support \p > operators that are non-commutative. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the argmax-reduction of a device vector of \p int data elements. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_segments; // e.g., 3 - * int *d_offsets; // e.g., [0, 3, 3, 7] - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * KeyValuePair *d_out; // e.g., [{-,-}, {-,-}, {-,-}] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run argmax-reduction - * cub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, - * num_segments, d_offsets, d_offsets + 1); - * - * // d_out <-- [{0,8}, {1,INT_MIN}, {3,9}] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items (of some type \p T) \iterator - * \tparam OutputIteratorT [inferred] Output iterator type for recording the reduced aggregate (having value type KeyValuePair) \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT> - CUB_RUNTIME_FUNCTION - static cudaError_t ArgMax( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_segments, ///< [in] The number of segments that comprise the sorting data - int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // The input type - typedef typename std::iterator_traits::value_type InputValueT; - - // The output tuple type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - KeyValuePair, // ... then the key value pair OffsetT + InputValueT - typename std::iterator_traits::value_type>::Type OutputTupleT; // ... else the output iterator's value type - - // The output value type - typedef typename OutputTupleT::Value OutputValueT; - - // Wrapped input iterator to produce index-value tuples - typedef ArgIndexInputIterator ArgIndexInputIteratorT; - ArgIndexInputIteratorT d_indexed_in(d_in); - - // Initial value - OutputTupleT initial_value(1, Traits::Lowest()); // replace with std::numeric_limits::lowest() when C++11 support is more prevalent - - return DispatchSegmentedReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_indexed_in, - d_out, - num_segments, - d_begin_offsets, - d_end_offsets, - cub::ArgMax(), - initial_value, - stream, - debug_synchronous); - } - -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/device_select.cuh b/ml-xgboost/cub/cub/device/device_select.cuh deleted file mode 100644 index 237b595..0000000 --- a/ml-xgboost/cub/cub/device/device_select.cuh +++ /dev/null @@ -1,369 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceSelect provides device-wide, parallel operations for compacting selected items from sequences of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "dispatch/dispatch_select_if.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief DeviceSelect provides device-wide, parallel operations for compacting selected items from sequences of data items residing within device-accessible memory. ![](select_logo.png) - * \ingroup SingleModule - * - * \par Overview - * These operations apply a selection criterion to selectively copy - * items from a specified input sequence to a compact output sequence. - * - * \par Usage Considerations - * \cdp_class{DeviceSelect} - * - * \par Performance - * \linear_performance{select-flagged, select-if, and select-unique} - * - * \par - * The following chart illustrates DeviceSelect::If - * performance across different CUDA architectures for \p int32 items, - * where 50% of the items are randomly selected. - * - * \image html select_if_int32_50_percent.png - * - * \par - * The following chart illustrates DeviceSelect::Unique - * performance across different CUDA architectures for \p int32 items - * where segments have lengths uniformly sampled from [1,1000]. - * - * \image html select_unique_int32_len_500.png - * - * \par - * \plots_below - * - */ -struct DeviceSelect -{ - /** - * \brief Uses the \p d_flags sequence to selectively copy the corresponding items from \p d_in into \p d_out. The total number of items selected is written to \p d_num_selected_out. ![](select_flags_logo.png) - * - * \par - * - The value type of \p d_flags must be castable to \p bool (e.g., \p bool, \p char, \p int, etc.). - * - Copies of the selected items are compacted into \p d_out and maintain their original relative ordering. - * - \devicestorage - * - * \par Snippet - * The code snippet below illustrates the compaction of items selected from an \p int device vector. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input, flags, and output - * int num_items; // e.g., 8 - * int *d_in; // e.g., [1, 2, 3, 4, 5, 6, 7, 8] - * char *d_flags; // e.g., [1, 0, 0, 1, 0, 1, 1, 0] - * int *d_out; // e.g., [ , , , , , , , ] - * int *d_num_selected_out; // e.g., [ ] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run selection - * cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items); - * - * // d_out <-- [1, 4, 6, 7] - * // d_num_selected_out <-- [4] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam FlagIterator [inferred] Random-access input iterator type for reading selection flags \iterator - * \tparam OutputIteratorT [inferred] Random-access output iterator type for writing selected items \iterator - * \tparam NumSelectedIteratorT [inferred] Output iterator type for recording the number of items selected \iterator - */ - template < - typename InputIteratorT, - typename FlagIterator, - typename OutputIteratorT, - typename NumSelectedIteratorT> - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Flagged( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - FlagIterator d_flags, ///< [in] Pointer to the input sequence of selection flags - OutputIteratorT d_out, ///< [out] Pointer to the output sequence of selected data items - NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the output total number of items selected (i.e., length of \p d_out) - int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - typedef int OffsetT; // Signed integer type for global offsets - typedef NullType SelectOp; // Selection op (not used) - typedef NullType EqualityOp; // Equality operator (not used) - - return DispatchSelectIf::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_flags, - d_out, - d_num_selected_out, - SelectOp(), - EqualityOp(), - num_items, - stream, - debug_synchronous); - } - - - /** - * \brief Uses the \p select_op functor to selectively copy items from \p d_in into \p d_out. The total number of items selected is written to \p d_num_selected_out. ![](select_logo.png) - * - * \par - * - Copies of the selected items are compacted into \p d_out and maintain their original relative ordering. - * - \devicestorage - * - * \par Performance - * The following charts illustrate saturated select-if performance across different - * CUDA architectures for \p int32 and \p int64 items, respectively. Items are - * selected with 50% probability. - * - * \image html select_if_int32_50_percent.png - * \image html select_if_int64_50_percent.png - * - * \par - * The following charts are similar, but 5% selection probability: - * - * \image html select_if_int32_5_percent.png - * \image html select_if_int64_5_percent.png - * - * \par Snippet - * The code snippet below illustrates the compaction of items selected from an \p int device vector. - * \par - * \code - * #include // or equivalently - * - * // Functor type for selecting values less than some criteria - * struct LessThan - * { - * int compare; - * - * CUB_RUNTIME_FUNCTION __forceinline__ - * LessThan(int compare) : compare(compare) {} - * - * CUB_RUNTIME_FUNCTION __forceinline__ - * bool operator()(const int &a) const { - * return (a < compare); - * } - * }; - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 8 - * int *d_in; // e.g., [0, 2, 3, 9, 5, 2, 81, 8] - * int *d_out; // e.g., [ , , , , , , , ] - * int *d_num_selected_out; // e.g., [ ] - * LessThan select_op(7); - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run selection - * cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op); - * - * // d_out <-- [0, 2, 3, 5, 2] - * // d_num_selected_out <-- [5] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OutputIteratorT [inferred] Random-access output iterator type for writing selected items \iterator - * \tparam NumSelectedIteratorT [inferred] Output iterator type for recording the number of items selected \iterator - * \tparam SelectOp [inferred] Selection operator type having member bool operator()(const T &a) - */ - template < - typename InputIteratorT, - typename OutputIteratorT, - typename NumSelectedIteratorT, - typename SelectOp> - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t If( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output sequence of selected data items - NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the output total number of items selected (i.e., length of \p d_out) - int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - SelectOp select_op, ///< [in] Unary selection operator - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - typedef int OffsetT; // Signed integer type for global offsets - typedef NullType* FlagIterator; // FlagT iterator type (not used) - typedef NullType EqualityOp; // Equality operator (not used) - - return DispatchSelectIf::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - NULL, - d_out, - d_num_selected_out, - select_op, - EqualityOp(), - num_items, - stream, - debug_synchronous); - } - - - /** - * \brief Given an input sequence \p d_in having runs of consecutive equal-valued keys, only the first key from each run is selectively copied to \p d_out. The total number of items selected is written to \p d_num_selected_out. ![](unique_logo.png) - * - * \par - * - The == equality operator is used to determine whether keys are equivalent - * - Copies of the selected items are compacted into \p d_out and maintain their original relative ordering. - * - \devicestorage - * - * \par Performance - * The following charts illustrate saturated select-unique performance across different - * CUDA architectures for \p int32 and \p int64 items, respectively. Segments have - * lengths uniformly sampled from [1,1000]. - * - * \image html select_unique_int32_len_500.png - * \image html select_unique_int64_len_500.png - * - * \par - * The following charts are similar, but with segment lengths uniformly sampled from [1,10]: - * - * \image html select_unique_int32_len_5.png - * \image html select_unique_int64_len_5.png - * - * \par Snippet - * The code snippet below illustrates the compaction of items selected from an \p int device vector. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input and output - * int num_items; // e.g., 8 - * int *d_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8] - * int *d_out; // e.g., [ , , , , , , , ] - * int *d_num_selected_out; // e.g., [ ] - * ... - * - * // Determine temporary device storage requirements - * void *d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSelect::Unique(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run selection - * cub::DeviceSelect::Unique(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items); - * - * // d_out <-- [0, 2, 9, 5, 8] - * // d_num_selected_out <-- [5] - * - * \endcode - * - * \tparam InputIteratorT [inferred] Random-access input iterator type for reading input items \iterator - * \tparam OutputIteratorT [inferred] Random-access output iterator type for writing selected items \iterator - * \tparam NumSelectedIteratorT [inferred] Output iterator type for recording the number of items selected \iterator - */ - template < - typename InputIteratorT, - typename OutputIteratorT, - typename NumSelectedIteratorT> - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Unique( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output sequence of selected data items - NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the output total number of items selected (i.e., length of \p d_out) - int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - typedef int OffsetT; // Signed integer type for global offsets - typedef NullType* FlagIterator; // FlagT iterator type (not used) - typedef NullType SelectOp; // Selection op (not used) - typedef Equality EqualityOp; // Default == operator - - return DispatchSelectIf::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - NULL, - d_out, - d_num_selected_out, - SelectOp(), - EqualityOp(), - num_items, - stream, - debug_synchronous); - } - -}; - -/** - * \example example_device_select_flagged.cu - * \example example_device_select_if.cu - * \example example_device_select_unique.cu - */ - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/device_spmv.cuh b/ml-xgboost/cub/cub/device/device_spmv.cuh deleted file mode 100644 index b0d0e02..0000000 --- a/ml-xgboost/cub/cub/device/device_spmv.cuh +++ /dev/null @@ -1,174 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceSpmv provides device-wide parallel operations for performing sparse-matrix * vector multiplication (SpMV). - */ - -#pragma once - -#include -#include -#include - -#include "dispatch/dispatch_spmv_orig.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief DeviceSpmv provides device-wide parallel operations for performing sparse-matrix * dense-vector multiplication (SpMV). - * \ingroup SingleModule - * - * \par Overview - * The [SpMV computation](http://en.wikipedia.org/wiki/Sparse_matrix-vector_multiplication) - * performs the matrix-vector operation - * y = alpha*A*x + beta*y, - * where: - * - A is an mxn sparse matrix whose non-zero structure is specified in - * [compressed-storage-row (CSR) format](http://en.wikipedia.org/wiki/Sparse_matrix#Compressed_row_Storage_.28CRS_or_CSR.29) - * (i.e., three arrays: values, row_offsets, and column_indices) - * - x and y are dense vectors - * - alpha and beta are scalar multiplicands - * - * \par Usage Considerations - * \cdp_class{DeviceSpmv} - * - */ -struct DeviceSpmv -{ - /******************************************************************//** - * \name CSR matrix operations - *********************************************************************/ - //@{ - - /** - * \brief This function performs the matrix-vector operation y = A*x. - * - * \par Snippet - * The code snippet below illustrates SpMV upon a 9x9 CSR matrix A - * representing a 3x3 lattice (24 non-zeros). - * - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize device-accessible pointers for input matrix A, input vector x, - * // and output vector y - * int num_rows = 9; - * int num_cols = 9; - * int num_nonzeros = 24; - * - * float* d_values; // e.g., [1, 1, 1, 1, 1, 1, 1, 1, - * // 1, 1, 1, 1, 1, 1, 1, 1, - * // 1, 1, 1, 1, 1, 1, 1, 1] - * - * int* d_column_indices; // e.g., [1, 3, 0, 2, 4, 1, 5, 0, - * // 4, 6, 1, 3, 5, 7, 2, 4, - * // 8, 3, 7, 4, 6, 8, 5, 7] - * - * int* d_row_offsets; // e.g., [0, 2, 5, 7, 10, 14, 17, 19, 22, 24] - * - * float* d_vector_x; // e.g., [1, 1, 1, 1, 1, 1, 1, 1, 1] - * float* d_vector_y; // e.g., [ , , , , , , , , ] - * ... - * - * // Determine temporary device storage requirements - * void* d_temp_storage = NULL; - * size_t temp_storage_bytes = 0; - * cub::DeviceSpmv::CsrMV(d_temp_storage, temp_storage_bytes, d_values, - * d_row_offsets, d_column_indices, d_vector_x, d_vector_y, - * num_rows, num_cols, num_nonzeros, alpha, beta); - * - * // Allocate temporary storage - * cudaMalloc(&d_temp_storage, temp_storage_bytes); - * - * // Run SpMV - * cub::DeviceSpmv::CsrMV(d_temp_storage, temp_storage_bytes, d_values, - * d_row_offsets, d_column_indices, d_vector_x, d_vector_y, - * num_rows, num_cols, num_nonzeros, alpha, beta); - * - * // d_vector_y <-- [2, 3, 2, 3, 4, 3, 2, 3, 2] - * - * \endcode - * - * \tparam ValueT [inferred] Matrix and vector value type (e.g., /p float, /p double, etc.) - */ - template < - typename ValueT> - CUB_RUNTIME_FUNCTION - static cudaError_t CsrMV( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - ValueT* d_values, ///< [in] Pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix A. - int* d_row_offsets, ///< [in] Pointer to the array of \p m + 1 offsets demarcating the start of every row in \p d_column_indices and \p d_values (with the final entry being equal to \p num_nonzeros) - int* d_column_indices, ///< [in] Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix A. (Indices are zero-valued.) - ValueT* d_vector_x, ///< [in] Pointer to the array of \p num_cols values corresponding to the dense input vector x - ValueT* d_vector_y, ///< [out] Pointer to the array of \p num_rows values corresponding to the dense output vector y - int num_rows, ///< [in] number of rows of matrix A. - int num_cols, ///< [in] number of columns of matrix A. - int num_nonzeros, ///< [in] number of nonzero elements of matrix A. - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - SpmvParams spmv_params; - spmv_params.d_values = d_values; - spmv_params.d_row_end_offsets = d_row_offsets + 1; - spmv_params.d_column_indices = d_column_indices; - spmv_params.d_vector_x = d_vector_x; - spmv_params.d_vector_y = d_vector_y; - spmv_params.num_rows = num_rows; - spmv_params.num_cols = num_cols; - spmv_params.num_nonzeros = num_nonzeros; - spmv_params.alpha = 1.0; - spmv_params.beta = 0.0; - - return DispatchSpmv::Dispatch( - d_temp_storage, - temp_storage_bytes, - spmv_params, - stream, - debug_synchronous); - } - - //@} end member group -}; - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/dispatch/dispatch_histogram.cuh b/ml-xgboost/cub/cub/device/dispatch/dispatch_histogram.cuh deleted file mode 100644 index 52504da..0000000 --- a/ml-xgboost/cub/cub/device/dispatch/dispatch_histogram.cuh +++ /dev/null @@ -1,1085 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceHistogram provides device-wide parallel operations for constructing histogram(s) from a sequence of samples data residing within device-accessible memory. - */ - -#pragma once - -#include -#include -#include - -#include "../../agent/agent_histogram.cuh" -#include "../../util_debug.cuh" -#include "../../util_device.cuh" -#include "../../thread/thread_search.cuh" -#include "../../grid/grid_queue.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - - -/****************************************************************************** - * Histogram kernel entry points - *****************************************************************************/ - -/** - * Histogram initialization kernel entry point - */ -template < - int NUM_ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed - typename CounterT, ///< Integer type for counting sample occurrences per histogram bin - typename OffsetT> ///< Signed integer type for global offsets -__global__ void DeviceHistogramInitKernel( - ArrayWrapper num_output_bins_wrapper, ///< Number of output histogram bins per channel - ArrayWrapper d_output_histograms_wrapper, ///< Histogram counter data having logical dimensions CounterT[NUM_ACTIVE_CHANNELS][num_bins.array[CHANNEL]] - GridQueue tile_queue) ///< Drain queue descriptor for dynamically mapping tile data onto thread blocks -{ - if ((threadIdx.x == 0) && (blockIdx.x == 0)) - tile_queue.ResetDrain(); - - int output_bin = (blockIdx.x * blockDim.x) + threadIdx.x; - - #pragma unroll - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - { - if (output_bin < num_output_bins_wrapper.array[CHANNEL]) - d_output_histograms_wrapper.array[CHANNEL][output_bin] = 0; - } -} - - -/** - * Histogram privatized sweep kernel entry point (multi-block). Computes privatized histograms, one per thread block. - */ -template < - typename AgentHistogramPolicyT, ///< Parameterized AgentHistogramPolicy tuning policy type - int PRIVATIZED_SMEM_BINS, ///< Maximum number of histogram bins per channel (e.g., up to 256) - int NUM_CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) - int NUM_ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed - typename SampleIteratorT, ///< The input iterator type. \iterator. - typename CounterT, ///< Integer type for counting sample occurrences per histogram bin - typename PrivatizedDecodeOpT, ///< The transform operator type for determining privatized counter indices from samples, one for each channel - typename OutputDecodeOpT, ///< The transform operator type for determining output bin-ids from privatized counter indices, one for each channel - typename OffsetT> ///< Signed integer type for global offsets -__launch_bounds__ (int(AgentHistogramPolicyT::BLOCK_THREADS)) -__global__ void DeviceHistogramSweepKernel( - SampleIteratorT d_samples, ///< Input data to reduce - ArrayWrapper num_output_bins_wrapper, ///< The number bins per final output histogram - ArrayWrapper num_privatized_bins_wrapper, ///< The number bins per privatized histogram - ArrayWrapper d_output_histograms_wrapper, ///< Reference to final output histograms - ArrayWrapper d_privatized_histograms_wrapper, ///< Reference to privatized histograms - ArrayWrapper output_decode_op_wrapper, ///< The transform operator for determining output bin-ids from privatized counter indices, one for each channel - ArrayWrapper privatized_decode_op_wrapper, ///< The transform operator for determining privatized counter indices from samples, one for each channel - OffsetT num_row_pixels, ///< The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< The number of rows in the region of interest - OffsetT row_stride_samples, ///< The number of samples between starts of consecutive rows in the region of interest - int tiles_per_row, ///< Number of image tiles per row - GridQueue tile_queue) ///< Drain queue descriptor for dynamically mapping tile data onto thread blocks -{ - // Thread block type for compositing input tiles - typedef AgentHistogram< - AgentHistogramPolicyT, - PRIVATIZED_SMEM_BINS, - NUM_CHANNELS, - NUM_ACTIVE_CHANNELS, - SampleIteratorT, - CounterT, - PrivatizedDecodeOpT, - OutputDecodeOpT, - OffsetT> - AgentHistogramT; - - // Shared memory for AgentHistogram - __shared__ typename AgentHistogramT::TempStorage temp_storage; - - AgentHistogramT agent( - temp_storage, - d_samples, - num_output_bins_wrapper.array, - num_privatized_bins_wrapper.array, - d_output_histograms_wrapper.array, - d_privatized_histograms_wrapper.array, - output_decode_op_wrapper.array, - privatized_decode_op_wrapper.array); - - // Initialize counters - agent.InitBinCounters(); - - // Consume input tiles - agent.ConsumeTiles( - num_row_pixels, - num_rows, - row_stride_samples, - tiles_per_row, - tile_queue); - - // Store output to global (if necessary) - agent.StoreOutput(); - -} - - - - - - -/****************************************************************************** - * Dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for DeviceHistogram - */ -template < - int NUM_CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) - int NUM_ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed - typename SampleIteratorT, ///< Random-access input iterator type for reading input items \iterator - typename CounterT, ///< Integer type for counting sample occurrences per histogram bin - typename LevelT, ///< Type for specifying bin level boundaries - typename OffsetT> ///< Signed integer type for global offsets -struct DipatchHistogram -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - /// The sample value type of the input iterator - typedef typename std::iterator_traits::value_type SampleT; - - enum - { - // Maximum number of bins per channel for which we will use a privatized smem strategy - MAX_PRIVATIZED_SMEM_BINS = 256 - }; - - - //--------------------------------------------------------------------- - // Transform functors for converting samples to bin-ids - //--------------------------------------------------------------------- - - // Searches for bin given a list of bin-boundary levels - template - struct SearchTransform - { - LevelIteratorT d_levels; // Pointer to levels array - int num_output_levels; // Number of levels in array - - // Initializer - __host__ __device__ __forceinline__ void Init( - LevelIteratorT d_levels, // Pointer to levels array - int num_output_levels) // Number of levels in array - { - this->d_levels = d_levels; - this->num_output_levels = num_output_levels; - } - - // Method for converting samples to bin-ids - template - __host__ __device__ __forceinline__ void BinSelect(_SampleT sample, int &bin, bool valid) - { - /// Level iterator wrapper type - typedef typename If::VALUE, - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedInputIterator - LevelIteratorT>::Type // Directly use the supplied input iterator type - WrappedLevelIteratorT; - - WrappedLevelIteratorT wrapped_levels(d_levels); - - int num_bins = num_output_levels - 1; - if (valid) - { - bin = UpperBound(wrapped_levels, num_output_levels, (LevelT) sample) - 1; - if (bin >= num_bins) - bin = -1; - } - } - }; - - - // Scales samples to evenly-spaced bins - struct ScaleTransform - { - int num_bins; // Number of levels in array - LevelT max; // Max sample level (exclusive) - LevelT min; // Min sample level (inclusive) - LevelT scale; // Bin scaling factor - - // Initializer - template - __host__ __device__ __forceinline__ void Init( - int num_output_levels, // Number of levels in array - _LevelT max, // Max sample level (exclusive) - _LevelT min, // Min sample level (inclusive) - _LevelT scale) // Bin scaling factor - { - this->num_bins = num_output_levels - 1; - this->max = max; - this->min = min; - this->scale = scale; - } - - // Initializer (float specialization) - __host__ __device__ __forceinline__ void Init( - int num_output_levels, // Number of levels in array - float max, // Max sample level (exclusive) - float min, // Min sample level (inclusive) - float scale) // Bin scaling factor - { - this->num_bins = num_output_levels - 1; - this->max = max; - this->min = min; - this->scale = float(1.0) / scale; - } - - // Initializer (double specialization) - __host__ __device__ __forceinline__ void Init( - int num_output_levels, // Number of levels in array - double max, // Max sample level (exclusive) - double min, // Min sample level (inclusive) - double scale) // Bin scaling factor - { - this->num_bins = num_output_levels - 1; - this->max = max; - this->min = min; - this->scale = double(1.0) / scale; - } - - // Method for converting samples to bin-ids - template - __host__ __device__ __forceinline__ void BinSelect(_SampleT sample, int &bin, bool valid) - { - LevelT level_sample = (LevelT) sample; - - if (valid && (level_sample >= min) && (level_sample < max)) - bin = (int) ((level_sample - min) / scale); - } - - // Method for converting samples to bin-ids (float specialization) - template - __host__ __device__ __forceinline__ void BinSelect(float sample, int &bin, bool valid) - { - LevelT level_sample = (LevelT) sample; - - if (valid && (level_sample >= min) && (level_sample < max)) - bin = (int) ((level_sample - min) * scale); - } - - // Method for converting samples to bin-ids (double specialization) - template - __host__ __device__ __forceinline__ void BinSelect(double sample, int &bin, bool valid) - { - LevelT level_sample = (LevelT) sample; - - if (valid && (level_sample >= min) && (level_sample < max)) - bin = (int) ((level_sample - min) * scale); - } - }; - - - // Pass-through bin transform operator - struct PassThruTransform - { - // Method for converting samples to bin-ids - template - __host__ __device__ __forceinline__ void BinSelect(_SampleT sample, int &bin, bool valid) - { - if (valid) - bin = (int) sample; - } - }; - - - - //--------------------------------------------------------------------- - // Tuning policies - //--------------------------------------------------------------------- - - /// SM11 - struct Policy110 - { - // HistogramSweepPolicy - typedef AgentHistogramPolicy< - 512, - (NUM_CHANNELS == 1) ? 8 : 2, - BLOCK_LOAD_DIRECT, - LOAD_DEFAULT, - true, - GMEM, - false> - HistogramSweepPolicy; - }; - - /// SM20 - struct Policy200 - { - // HistogramSweepPolicy - typedef AgentHistogramPolicy< - (NUM_CHANNELS == 1) ? 256 : 128, - (NUM_CHANNELS == 1) ? 8 : 3, - (NUM_CHANNELS == 1) ? BLOCK_LOAD_DIRECT : BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - true, - SMEM, - false> - HistogramSweepPolicy; - }; - - /// SM30 - struct Policy300 - { - // HistogramSweepPolicy - typedef AgentHistogramPolicy< - 512, - (NUM_CHANNELS == 1) ? 8 : 2, - BLOCK_LOAD_DIRECT, - LOAD_DEFAULT, - true, - GMEM, - false> - HistogramSweepPolicy; - }; - - /// SM35 - struct Policy350 - { - // HistogramSweepPolicy - typedef AgentHistogramPolicy< - 128, - (NUM_CHANNELS == 1) ? 8 : 7, - BLOCK_LOAD_DIRECT, - LOAD_LDG, - true, - BLEND, - true> - HistogramSweepPolicy; - }; - - /// SM50 - struct Policy500 - { - // HistogramSweepPolicy - typedef AgentHistogramPolicy< - 256, - 8, - BLOCK_LOAD_DIRECT, - LOAD_LDG, - true, - SMEM, - true> - HistogramSweepPolicy; - }; - - - - //--------------------------------------------------------------------- - // Tuning policies of current PTX compiler pass - //--------------------------------------------------------------------- - -#if (CUB_PTX_ARCH >= 500) - typedef Policy500 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 350) - typedef Policy350 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 300) - typedef Policy300 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 200) - typedef Policy200 PtxPolicy; - -#else - typedef Policy110 PtxPolicy; - -#endif - - // "Opaque" policies (whose parameterizations aren't reflected in the type signature) - struct PtxHistogramSweepPolicy : PtxPolicy::HistogramSweepPolicy {}; - - - //--------------------------------------------------------------------- - // Utilities - //--------------------------------------------------------------------- - - /** - * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use - */ - template - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t InitConfigs( - int ptx_version, - KernelConfig &histogram_sweep_config) - { - #if (CUB_PTX_ARCH > 0) - - // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy - return histogram_sweep_config.template Init(); - - #else - - // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version - if (ptx_version >= 500) - { - return histogram_sweep_config.template Init(); - } - else if (ptx_version >= 350) - { - return histogram_sweep_config.template Init(); - } - else if (ptx_version >= 300) - { - return histogram_sweep_config.template Init(); - } - else if (ptx_version >= 200) - { - return histogram_sweep_config.template Init(); - } - else if (ptx_version >= 110) - { - return histogram_sweep_config.template Init(); - } - else - { - // No global atomic support - return cudaErrorNotSupported; - } - - #endif - } - - - /** - * Kernel kernel dispatch configuration - */ - struct KernelConfig - { - int block_threads; - int pixels_per_thread; - - template - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t Init() - { - block_threads = BlockPolicy::BLOCK_THREADS; - pixels_per_thread = BlockPolicy::PIXELS_PER_THREAD; - - return cudaSuccess; - } - }; - - - //--------------------------------------------------------------------- - // Dispatch entrypoints - //--------------------------------------------------------------------- - - /** - * Privatization-based dispatch routine - */ - template < - typename PrivatizedDecodeOpT, ///< The transform operator type for determining privatized counter indices from samples, one for each channel - typename OutputDecodeOpT, ///< The transform operator type for determining output bin-ids from privatized counter indices, one for each channel - typename DeviceHistogramInitKernelT, ///< Function type of cub::DeviceHistogramInitKernel - typename DeviceHistogramSweepKernelT> ///< Function type of cub::DeviceHistogramSweepKernel - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t PrivatizedDispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of sample items. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT* d_output_histograms[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_output_levels[i] - 1. - int num_privatized_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of bin level boundaries for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_output_levels[i] - 1. - PrivatizedDecodeOpT privatized_decode_op[NUM_ACTIVE_CHANNELS], ///< [in] Transform operators for determining bin-ids from samples, one for each channel - int num_output_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of bin level boundaries for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_output_levels[i] - 1. - OutputDecodeOpT output_decode_op[NUM_ACTIVE_CHANNELS], ///< [in] Transform operators for determining bin-ids from samples, one for each channel - int max_num_output_bins, ///< [in] Maximum number of output bins in any channel - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_samples, ///< [in] The number of samples between starts of consecutive rows in the region of interest - DeviceHistogramInitKernelT histogram_init_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceHistogramInitKernel - DeviceHistogramSweepKernelT histogram_sweep_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceHistogramSweepKernel - KernelConfig histogram_sweep_config, ///< [in] Dispatch parameters that match the policy that \p histogram_sweep_kernel was compiled for - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous) ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - #ifndef CUB_RUNTIME_ENABLED - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported); - - #else - - cudaError error = cudaSuccess; - do - { - // Get device ordinal - int device_ordinal; - if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; - - // Get SM count - int sm_count; - if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; - - // Get SM occupancy for histogram_sweep_kernel - int histogram_sweep_sm_occupancy; - if (CubDebug(error = MaxSmOccupancy( - histogram_sweep_sm_occupancy, - histogram_sweep_kernel, - histogram_sweep_config.block_threads))) break; - - // Get device occupancy for histogram_sweep_kernel - int histogram_sweep_occupancy = histogram_sweep_sm_occupancy * sm_count; - - if (num_row_pixels * NUM_CHANNELS == row_stride_samples) - { - // Treat as a single linear array of samples - num_row_pixels *= num_rows; - num_rows = 1; - row_stride_samples = num_row_pixels * NUM_CHANNELS; - } - - // Get grid dimensions, trying to keep total blocks ~histogram_sweep_occupancy - int pixels_per_tile = histogram_sweep_config.block_threads * histogram_sweep_config.pixels_per_thread; - int tiles_per_row = int(num_row_pixels + pixels_per_tile - 1) / pixels_per_tile; - int blocks_per_row = CUB_MIN(histogram_sweep_occupancy, tiles_per_row); - int blocks_per_col = (blocks_per_row > 0) ? - int(CUB_MIN(histogram_sweep_occupancy / blocks_per_row, num_rows)) : - 0; - int num_threadblocks = blocks_per_row * blocks_per_col; - - dim3 sweep_grid_dims; - sweep_grid_dims.x = (unsigned int) blocks_per_row; - sweep_grid_dims.y = (unsigned int) blocks_per_col; - sweep_grid_dims.z = 1; - - // Temporary storage allocation requirements - const int NUM_ALLOCATIONS = NUM_ACTIVE_CHANNELS + 1; - void* allocations[NUM_ALLOCATIONS]; - size_t allocation_sizes[NUM_ALLOCATIONS]; - - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - allocation_sizes[CHANNEL] = size_t(num_threadblocks) * (num_privatized_levels[CHANNEL] - 1) * sizeof(CounterT); - - allocation_sizes[NUM_ALLOCATIONS - 1] = GridQueue::AllocationSize(); - - // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - break; - } - - // Construct the grid queue descriptor - GridQueue tile_queue(allocations[NUM_ALLOCATIONS - 1]); - - // Setup array wrapper for histogram channel output (because we can't pass static arrays as kernel parameters) - ArrayWrapper d_output_histograms_wrapper; - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - d_output_histograms_wrapper.array[CHANNEL] = d_output_histograms[CHANNEL]; - - // Setup array wrapper for privatized per-block histogram channel output (because we can't pass static arrays as kernel parameters) - ArrayWrapper d_privatized_histograms_wrapper; - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - d_privatized_histograms_wrapper.array[CHANNEL] = (CounterT*) allocations[CHANNEL]; - - // Setup array wrapper for sweep bin transforms (because we can't pass static arrays as kernel parameters) - ArrayWrapper privatized_decode_op_wrapper; - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - privatized_decode_op_wrapper.array[CHANNEL] = privatized_decode_op[CHANNEL]; - - // Setup array wrapper for aggregation bin transforms (because we can't pass static arrays as kernel parameters) - ArrayWrapper output_decode_op_wrapper; - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - output_decode_op_wrapper.array[CHANNEL] = output_decode_op[CHANNEL]; - - // Setup array wrapper for num privatized bins (because we can't pass static arrays as kernel parameters) - ArrayWrapper num_privatized_bins_wrapper; - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - num_privatized_bins_wrapper.array[CHANNEL] = num_privatized_levels[CHANNEL] - 1; - - // Setup array wrapper for num output bins (because we can't pass static arrays as kernel parameters) - ArrayWrapper num_output_bins_wrapper; - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - num_output_bins_wrapper.array[CHANNEL] = num_output_levels[CHANNEL] - 1; - - int histogram_init_block_threads = 256; - int histogram_init_grid_dims = (max_num_output_bins + histogram_init_block_threads - 1) / histogram_init_block_threads; - - // Log DeviceHistogramInitKernel configuration - if (debug_synchronous) _CubLog("Invoking DeviceHistogramInitKernel<<<%d, %d, 0, %lld>>>()\n", - histogram_init_grid_dims, histogram_init_block_threads, (long long) stream); - - // Invoke histogram_init_kernel - histogram_init_kernel<<>>( - num_output_bins_wrapper, - d_output_histograms_wrapper, - tile_queue); - - // Return if empty problem - if ((blocks_per_row == 0) || (blocks_per_col == 0)) - break; - - // Log histogram_sweep_kernel configuration - if (debug_synchronous) _CubLog("Invoking histogram_sweep_kernel<<<{%d, %d, %d}, %d, 0, %lld>>>(), %d pixels per thread, %d SM occupancy\n", - sweep_grid_dims.x, sweep_grid_dims.y, sweep_grid_dims.z, - histogram_sweep_config.block_threads, (long long) stream, histogram_sweep_config.pixels_per_thread, histogram_sweep_sm_occupancy); - - // Invoke histogram_sweep_kernel - histogram_sweep_kernel<<>>( - d_samples, - num_output_bins_wrapper, - num_privatized_bins_wrapper, - d_output_histograms_wrapper, - d_privatized_histograms_wrapper, - output_decode_op_wrapper, - privatized_decode_op_wrapper, - num_row_pixels, - num_rows, - row_stride_samples, - tiles_per_row, - tile_queue); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - } - while (0); - - return error; - - #endif // CUB_RUNTIME_ENABLED - } - - - - /** - * Dispatch routine for HistogramRange, specialized for sample types larger than 8bit - */ - CUB_RUNTIME_FUNCTION - static cudaError_t DispatchRange( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT* d_output_histograms[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_output_levels[i] - 1. - int num_output_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_output_levels[i] - 1. - LevelT *d_levels[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_samples, ///< [in] The number of samples between starts of consecutive rows in the region of interest - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - Int2Type is_byte_sample) ///< [in] Marker type indicating whether or not SampleT is a 8b type - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - #if (CUB_PTX_ARCH == 0) - if (CubDebug(error = PtxVersion(ptx_version))) break; - #else - ptx_version = CUB_PTX_ARCH; - #endif - - // Get kernel dispatch configurations - KernelConfig histogram_sweep_config; - if (CubDebug(error = InitConfigs(ptx_version, histogram_sweep_config))) - break; - - // Use the search transform op for converting samples to privatized bins - typedef SearchTransform PrivatizedDecodeOpT; - - // Use the pass-thru transform op for converting privatized bins to output bins - typedef PassThruTransform OutputDecodeOpT; - - PrivatizedDecodeOpT privatized_decode_op[NUM_ACTIVE_CHANNELS]; - OutputDecodeOpT output_decode_op[NUM_ACTIVE_CHANNELS]; - int max_levels = num_output_levels[0]; - - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - privatized_decode_op[channel].Init(d_levels[channel], num_output_levels[channel]); - if (num_output_levels[channel] > max_levels) - max_levels = num_output_levels[channel]; - } - int max_num_output_bins = max_levels - 1; - - // Dispatch - if (max_num_output_bins > MAX_PRIVATIZED_SMEM_BINS) - { - // Too many bins to keep in shared memory. - const int PRIVATIZED_SMEM_BINS = 0; - - if (CubDebug(error = PrivatizedDispatch( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_output_histograms, - num_output_levels, - privatized_decode_op, - num_output_levels, - output_decode_op, - max_num_output_bins, - num_row_pixels, - num_rows, - row_stride_samples, - DeviceHistogramInitKernel, - DeviceHistogramSweepKernel, - histogram_sweep_config, - stream, - debug_synchronous))) break; - } - else - { - // Dispatch shared-privatized approach - const int PRIVATIZED_SMEM_BINS = MAX_PRIVATIZED_SMEM_BINS; - - if (CubDebug(error = PrivatizedDispatch( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_output_histograms, - num_output_levels, - privatized_decode_op, - num_output_levels, - output_decode_op, - max_num_output_bins, - num_row_pixels, - num_rows, - row_stride_samples, - DeviceHistogramInitKernel, - DeviceHistogramSweepKernel, - histogram_sweep_config, - stream, - debug_synchronous))) break; - } - - } while (0); - - return error; - } - - - /** - * Dispatch routine for HistogramRange, specialized for 8-bit sample types (computes 256-bin privatized histograms and then reduces to user-specified levels) - */ - CUB_RUNTIME_FUNCTION - static cudaError_t DispatchRange( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT* d_output_histograms[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_output_levels[i] - 1. - int num_output_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_output_levels[i] - 1. - LevelT *d_levels[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_samples, ///< [in] The number of samples between starts of consecutive rows in the region of interest - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - Int2Type is_byte_sample) ///< [in] Marker type indicating whether or not SampleT is a 8b type - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - #if (CUB_PTX_ARCH == 0) - if (CubDebug(error = PtxVersion(ptx_version))) break; - #else - ptx_version = CUB_PTX_ARCH; - #endif - - // Get kernel dispatch configurations - KernelConfig histogram_sweep_config; - if (CubDebug(error = InitConfigs(ptx_version, histogram_sweep_config))) - break; - - // Use the pass-thru transform op for converting samples to privatized bins - typedef PassThruTransform PrivatizedDecodeOpT; - - // Use the search transform op for converting privatized bins to output bins - typedef SearchTransform OutputDecodeOpT; - - int num_privatized_levels[NUM_ACTIVE_CHANNELS]; - PrivatizedDecodeOpT privatized_decode_op[NUM_ACTIVE_CHANNELS]; - OutputDecodeOpT output_decode_op[NUM_ACTIVE_CHANNELS]; - int max_levels = num_output_levels[0]; // Maximum number of levels in any channel - - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - num_privatized_levels[channel] = 257; - output_decode_op[channel].Init(d_levels[channel], num_output_levels[channel]); - - if (num_output_levels[channel] > max_levels) - max_levels = num_output_levels[channel]; - } - int max_num_output_bins = max_levels - 1; - - const int PRIVATIZED_SMEM_BINS = 256; - - if (CubDebug(error = PrivatizedDispatch( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_output_histograms, - num_privatized_levels, - privatized_decode_op, - num_output_levels, - output_decode_op, - max_num_output_bins, - num_row_pixels, - num_rows, - row_stride_samples, - DeviceHistogramInitKernel, - DeviceHistogramSweepKernel, - histogram_sweep_config, - stream, - debug_synchronous))) break; - - } while (0); - - return error; - } - - - /** - * Dispatch routine for HistogramEven, specialized for sample types larger than 8-bit - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t DispatchEven( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of sample items. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT* d_output_histograms[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_output_levels[i] - 1. - int num_output_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of bin level boundaries for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_output_levels[i] - 1. - LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_samples, ///< [in] The number of samples between starts of consecutive rows in the region of interest - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - Int2Type is_byte_sample) ///< [in] Marker type indicating whether or not SampleT is a 8b type - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - #if (CUB_PTX_ARCH == 0) - if (CubDebug(error = PtxVersion(ptx_version))) break; - #else - ptx_version = CUB_PTX_ARCH; - #endif - - // Get kernel dispatch configurations - KernelConfig histogram_sweep_config; - if (CubDebug(error = InitConfigs(ptx_version, histogram_sweep_config))) - break; - - // Use the scale transform op for converting samples to privatized bins - typedef ScaleTransform PrivatizedDecodeOpT; - - // Use the pass-thru transform op for converting privatized bins to output bins - typedef PassThruTransform OutputDecodeOpT; - - PrivatizedDecodeOpT privatized_decode_op[NUM_ACTIVE_CHANNELS]; - OutputDecodeOpT output_decode_op[NUM_ACTIVE_CHANNELS]; - int max_levels = num_output_levels[0]; - - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - int bins = num_output_levels[channel] - 1; - LevelT scale = (upper_level[channel] - lower_level[channel]) / bins; - - privatized_decode_op[channel].Init(num_output_levels[channel], upper_level[channel], lower_level[channel], scale); - - if (num_output_levels[channel] > max_levels) - max_levels = num_output_levels[channel]; - } - int max_num_output_bins = max_levels - 1; - - if (max_num_output_bins > MAX_PRIVATIZED_SMEM_BINS) - { - // Dispatch shared-privatized approach - const int PRIVATIZED_SMEM_BINS = 0; - - if (CubDebug(error = PrivatizedDispatch( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_output_histograms, - num_output_levels, - privatized_decode_op, - num_output_levels, - output_decode_op, - max_num_output_bins, - num_row_pixels, - num_rows, - row_stride_samples, - DeviceHistogramInitKernel, - DeviceHistogramSweepKernel, - histogram_sweep_config, - stream, - debug_synchronous))) break; - } - else - { - // Dispatch shared-privatized approach - const int PRIVATIZED_SMEM_BINS = MAX_PRIVATIZED_SMEM_BINS; - - if (CubDebug(error = PrivatizedDispatch( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_output_histograms, - num_output_levels, - privatized_decode_op, - num_output_levels, - output_decode_op, - max_num_output_bins, - num_row_pixels, - num_rows, - row_stride_samples, - DeviceHistogramInitKernel, - DeviceHistogramSweepKernel, - histogram_sweep_config, - stream, - debug_synchronous))) break; - } - } - while (0); - - return error; - } - - - /** - * Dispatch routine for HistogramEven, specialized for 8-bit sample types (computes 256-bin privatized histograms and then reduces to user-specified levels) - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t DispatchEven( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of sample items. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT* d_output_histograms[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_output_levels[i] - 1. - int num_output_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of bin level boundaries for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_output_levels[i] - 1. - LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_samples, ///< [in] The number of samples between starts of consecutive rows in the region of interest - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - Int2Type is_byte_sample) ///< [in] Marker type indicating whether or not SampleT is a 8b type - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - #if (CUB_PTX_ARCH == 0) - if (CubDebug(error = PtxVersion(ptx_version))) break; - #else - ptx_version = CUB_PTX_ARCH; - #endif - - // Get kernel dispatch configurations - KernelConfig histogram_sweep_config; - if (CubDebug(error = InitConfigs(ptx_version, histogram_sweep_config))) - break; - - // Use the pass-thru transform op for converting samples to privatized bins - typedef PassThruTransform PrivatizedDecodeOpT; - - // Use the scale transform op for converting privatized bins to output bins - typedef ScaleTransform OutputDecodeOpT; - - int num_privatized_levels[NUM_ACTIVE_CHANNELS]; - PrivatizedDecodeOpT privatized_decode_op[NUM_ACTIVE_CHANNELS]; - OutputDecodeOpT output_decode_op[NUM_ACTIVE_CHANNELS]; - int max_levels = num_output_levels[0]; - - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - num_privatized_levels[channel] = 257; - - int bins = num_output_levels[channel] - 1; - LevelT scale = (upper_level[channel] - lower_level[channel]) / bins; - output_decode_op[channel].Init(num_output_levels[channel], upper_level[channel], lower_level[channel], scale); - - if (num_output_levels[channel] > max_levels) - max_levels = num_output_levels[channel]; - } - int max_num_output_bins = max_levels - 1; - - const int PRIVATIZED_SMEM_BINS = 256; - - if (CubDebug(error = PrivatizedDispatch( - d_temp_storage, - temp_storage_bytes, - d_samples, - d_output_histograms, - num_privatized_levels, - privatized_decode_op, - num_output_levels, - output_decode_op, - max_num_output_bins, - num_row_pixels, - num_rows, - row_stride_samples, - DeviceHistogramInitKernel, - DeviceHistogramSweepKernel, - histogram_sweep_config, - stream, - debug_synchronous))) break; - - } - while (0); - - return error; - } - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/dispatch/dispatch_radix_sort.cuh b/ml-xgboost/cub/cub/device/dispatch/dispatch_radix_sort.cuh deleted file mode 100644 index d2483ad..0000000 --- a/ml-xgboost/cub/cub/device/dispatch/dispatch_radix_sort.cuh +++ /dev/null @@ -1,1572 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceRadixSort provides device-wide, parallel operations for computing a radix sort across a sequence of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "../../agent/agent_radix_sort_upsweep.cuh" -#include "../../agent/agent_radix_sort_downsweep.cuh" -#include "../../agent/agent_scan.cuh" -#include "../../block/block_radix_sort.cuh" -#include "../../grid/grid_even_share.cuh" -#include "../../util_type.cuh" -#include "../../util_debug.cuh" -#include "../../util_device.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/****************************************************************************** - * Kernel entry points - *****************************************************************************/ - -/** - * Upsweep digit-counting kernel entry point (multi-block). Computes privatized digit histograms, one per block. - */ -template < - typename ChainedPolicyT, ///< Chained tuning policy - bool ALT_DIGIT_BITS, ///< Whether or not to use the alternate (lower-bits) policy - bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low - typename KeyT, ///< Key type - typename OffsetT> ///< Signed integer type for global offsets -__launch_bounds__ (int((ALT_DIGIT_BITS) ? - ChainedPolicyT::ActivePolicy::AltUpsweepPolicy::BLOCK_THREADS : - ChainedPolicyT::ActivePolicy::UpsweepPolicy::BLOCK_THREADS)) -__global__ void DeviceRadixSortUpsweepKernel( - const KeyT *d_keys, ///< [in] Input keys buffer - OffsetT *d_spine, ///< [out] Privatized (per block) digit histograms (striped, i.e., 0s counts from each block, then 1s counts from each block, etc.) - OffsetT /*num_items*/, ///< [in] Total number of input data items - int current_bit, ///< [in] Bit position of current radix digit - int num_bits, ///< [in] Number of bits of current radix digit - GridEvenShare even_share) ///< [in] Even-share descriptor for mapan equal number of tiles onto each thread block -{ - // Parameterize AgentRadixSortUpsweep type for the current configuration - typedef AgentRadixSortUpsweep< - typename If<(ALT_DIGIT_BITS), - typename ChainedPolicyT::ActivePolicy::AltUpsweepPolicy, - typename ChainedPolicyT::ActivePolicy::UpsweepPolicy>::Type, - KeyT, - OffsetT> - AgentRadixSortUpsweepT; - - // Shared memory storage - __shared__ typename AgentRadixSortUpsweepT::TempStorage temp_storage; - - // Initialize even-share descriptor for this thread block - even_share.BlockInit(); - - OffsetT bin_count; - AgentRadixSortUpsweepT(temp_storage, d_keys, current_bit, num_bits).ProcessRegion( - even_share.block_offset, - even_share.block_end, - bin_count); - - // Write out digit counts (striped) - if (threadIdx.x < AgentRadixSortUpsweepT::RADIX_DIGITS) - { - int bin_idx = (IS_DESCENDING) ? - AgentRadixSortUpsweepT::RADIX_DIGITS - threadIdx.x - 1 : - threadIdx.x; - - d_spine[(gridDim.x * bin_idx) + blockIdx.x] = bin_count; - } -} - - -/** - * Spine scan kernel entry point (single-block). Computes an exclusive prefix sum over the privatized digit histograms - */ -template < - typename ChainedPolicyT, ///< Chained tuning policy - typename OffsetT> ///< Signed integer type for global offsets -__launch_bounds__ (int(ChainedPolicyT::ActivePolicy::ScanPolicy::BLOCK_THREADS), 1) -__global__ void RadixSortScanBinsKernel( - OffsetT *d_spine, ///< [in,out] Privatized (per block) digit histograms (striped, i.e., 0s counts from each block, then 1s counts from each block, etc.) - int num_counts) ///< [in] Total number of bin-counts -{ - // Parameterize the AgentScan type for the current configuration - typedef AgentScan< - typename ChainedPolicyT::ActivePolicy::ScanPolicy, - OffsetT*, - OffsetT*, - cub::Sum, - OffsetT, - OffsetT> - AgentScanT; - - // Shared memory storage - __shared__ typename AgentScanT::TempStorage temp_storage; - - // Block scan instance - AgentScanT block_scan(temp_storage, d_spine, d_spine, cub::Sum(), OffsetT(0)) ; - - // Process full input tiles - int block_offset = 0; - BlockScanRunningPrefixOp prefix_op(0, Sum()); - while (block_offset + AgentScanT::TILE_ITEMS <= num_counts) - { - block_scan.template ConsumeTile(block_offset, prefix_op); - block_offset += AgentScanT::TILE_ITEMS; - } -} - - -/** - * Downsweep pass kernel entry point (multi-block). Scatters keys (and values) into corresponding bins for the current digit place. - */ -template < - typename ChainedPolicyT, ///< Chained tuning policy - bool ALT_DIGIT_BITS, ///< Whether or not to use the alternate (lower-bits) policy - bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low - typename KeyT, ///< Key type - typename ValueT, ///< Value type - typename OffsetT> ///< Signed integer type for global offsets -__launch_bounds__ (int((ALT_DIGIT_BITS) ? - ChainedPolicyT::ActivePolicy::AltDownsweepPolicy::BLOCK_THREADS : - ChainedPolicyT::ActivePolicy::DownsweepPolicy::BLOCK_THREADS)) -__global__ void DeviceRadixSortDownsweepKernel( - const KeyT *d_keys_in, ///< [in] Input keys buffer - KeyT *d_keys_out, ///< [in] Output keys buffer - const ValueT *d_values_in, ///< [in] Input values buffer - ValueT *d_values_out, ///< [in] Output values buffer - OffsetT *d_spine, ///< [in] Scan of privatized (per block) digit histograms (striped, i.e., 0s counts from each block, then 1s counts from each block, etc.) - OffsetT num_items, ///< [in] Total number of input data items - int current_bit, ///< [in] Bit position of current radix digit - int num_bits, ///< [in] Number of bits of current radix digit - GridEvenShare even_share) ///< [in] Even-share descriptor for mapan equal number of tiles onto each thread block -{ - // Parameterize AgentRadixSortDownsweep type for the current configuration - typedef AgentRadixSortDownsweep< - typename If<(ALT_DIGIT_BITS), - typename ChainedPolicyT::ActivePolicy::AltDownsweepPolicy, - typename ChainedPolicyT::ActivePolicy::DownsweepPolicy>::Type, - IS_DESCENDING, - KeyT, - ValueT, - OffsetT> - AgentRadixSortDownsweepT; - - // Shared memory storage - __shared__ typename AgentRadixSortDownsweepT::TempStorage temp_storage; - - // Initialize even-share descriptor for this thread block - even_share.BlockInit(); - - // Process input tiles - AgentRadixSortDownsweepT(temp_storage, num_items, d_spine, d_keys_in, d_keys_out, d_values_in, d_values_out, current_bit, num_bits).ProcessRegion( - even_share.block_offset, - even_share.block_end); -} - - -/** - * Single pass kernel entry point (single-block). Fully sorts a tile of input. - */ -template < - typename ChainedPolicyT, ///< Chained tuning policy - bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low - typename KeyT, ///< Key type - typename ValueT, ///< Value type - typename OffsetT> ///< Signed integer type for global offsets -__launch_bounds__ (int(ChainedPolicyT::ActivePolicy::SingleTilePolicy::BLOCK_THREADS), 1) -__global__ void DeviceRadixSortSingleTileKernel( - const KeyT *d_keys_in, ///< [in] Input keys buffer - KeyT *d_keys_out, ///< [in] Output keys buffer - const ValueT *d_values_in, ///< [in] Input values buffer - ValueT *d_values_out, ///< [in] Output values buffer - OffsetT num_items, ///< [in] Total number of input data items - int current_bit, ///< [in] Bit position of current radix digit - int end_bit) ///< [in] The past-the-end (most-significant) bit index needed for key comparison -{ - // Constants - enum - { - BLOCK_THREADS = ChainedPolicyT::ActivePolicy::SingleTilePolicy::BLOCK_THREADS, - ITEMS_PER_THREAD = ChainedPolicyT::ActivePolicy::SingleTilePolicy::ITEMS_PER_THREAD, - KEYS_ONLY = Equals::VALUE, - }; - - // BlockRadixSort type - typedef BlockRadixSort< - KeyT, - BLOCK_THREADS, - ITEMS_PER_THREAD, - ValueT, - ChainedPolicyT::ActivePolicy::SingleTilePolicy::RADIX_BITS, - ChainedPolicyT::ActivePolicy::SingleTilePolicy::MEMOIZE_OUTER_SCAN, - ChainedPolicyT::ActivePolicy::SingleTilePolicy::INNER_SCAN_ALGORITHM> - BlockRadixSortT; - - // BlockLoad type (keys) - typedef BlockLoad< - KeyT, - BLOCK_THREADS, - ITEMS_PER_THREAD, - ChainedPolicyT::ActivePolicy::SingleTilePolicy::LOAD_ALGORITHM> BlockLoadKeys; - - // BlockLoad type (values) - typedef BlockLoad< - ValueT, - BLOCK_THREADS, - ITEMS_PER_THREAD, - ChainedPolicyT::ActivePolicy::SingleTilePolicy::LOAD_ALGORITHM> BlockLoadValues; - - // Unsigned word for key bits - typedef typename Traits::UnsignedBits UnsignedBitsT; - - // Shared memory storage - __shared__ union - { - typename BlockRadixSortT::TempStorage sort; - typename BlockLoadKeys::TempStorage load_keys; - typename BlockLoadValues::TempStorage load_values; - - } temp_storage; - - // Keys and values for the block - KeyT keys[ITEMS_PER_THREAD]; - ValueT values[ITEMS_PER_THREAD]; - - // Get default (min/max) value for out-of-bounds keys - UnsignedBitsT default_key_bits = (IS_DESCENDING) ? Traits::LOWEST_KEY : Traits::MAX_KEY; - KeyT default_key = reinterpret_cast(default_key_bits); - - // Load keys - BlockLoadKeys(temp_storage.load_keys).Load(d_keys_in, keys, num_items, default_key); - - CTA_SYNC(); - - // Load values - if (!KEYS_ONLY) - { - BlockLoadValues(temp_storage.load_values).Load(d_values_in, values, num_items); - - CTA_SYNC(); - } - - // Sort tile - BlockRadixSortT(temp_storage.sort).SortBlockedToStriped( - keys, - values, - current_bit, - end_bit, - Int2Type(), - Int2Type()); - - // Store keys and values - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int item_offset = ITEM * BLOCK_THREADS + threadIdx.x; - if (item_offset < num_items) - { - d_keys_out[item_offset] = keys[ITEM]; - if (!KEYS_ONLY) - d_values_out[item_offset] = values[ITEM]; - } - } -} - - -/** - * Segmented radix sorting pass (one block per segment) - */ -template < - typename ChainedPolicyT, ///< Chained tuning policy - bool ALT_DIGIT_BITS, ///< Whether or not to use the alternate (lower-bits) policy - bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low - typename KeyT, ///< Key type - typename ValueT, ///< Value type - typename OffsetT> ///< Signed integer type for global offsets -__launch_bounds__ (int((ALT_DIGIT_BITS) ? - ChainedPolicyT::ActivePolicy::AltSegmentedPolicy::BLOCK_THREADS : - ChainedPolicyT::ActivePolicy::SegmentedPolicy::BLOCK_THREADS)) -__global__ void DeviceSegmentedRadixSortKernel( - const KeyT *d_keys_in, ///< [in] Input keys buffer - KeyT *d_keys_out, ///< [in] Output keys buffer - const ValueT *d_values_in, ///< [in] Input values buffer - ValueT *d_values_out, ///< [in] Output values buffer - const int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - const int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int /*num_segments*/, ///< [in] The number of segments that comprise the sorting data - int current_bit, ///< [in] Bit position of current radix digit - int pass_bits) ///< [in] Number of bits of current radix digit -{ - // - // Constants - // - - typedef typename If<(ALT_DIGIT_BITS), - typename ChainedPolicyT::ActivePolicy::AltSegmentedPolicy, - typename ChainedPolicyT::ActivePolicy::SegmentedPolicy>::Type SegmentedPolicyT; - - enum - { - BLOCK_THREADS = SegmentedPolicyT::BLOCK_THREADS, - ITEMS_PER_THREAD = SegmentedPolicyT::ITEMS_PER_THREAD, - RADIX_BITS = SegmentedPolicyT::RADIX_BITS, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - RADIX_DIGITS = 1 << RADIX_BITS, - KEYS_ONLY = Equals::VALUE, - }; - - // Upsweep type - typedef AgentRadixSortUpsweep< - AgentRadixSortUpsweepPolicy, - KeyT, - OffsetT> - BlockUpsweepT; - - // Digit-scan type - typedef BlockScan DigitScanT; - - // Downsweep type - typedef AgentRadixSortDownsweep BlockDownsweepT; - - // - // Process input tiles - // - - // Shared memory storage - __shared__ union - { - typename BlockUpsweepT::TempStorage upsweep; - typename BlockDownsweepT::TempStorage downsweep; - struct - { - volatile OffsetT reverse_counts_in[RADIX_DIGITS]; - volatile OffsetT reverse_counts_out[RADIX_DIGITS]; - typename DigitScanT::TempStorage scan; - }; - - } temp_storage; - - OffsetT segment_begin = d_begin_offsets[blockIdx.x]; - OffsetT segment_end = d_end_offsets[blockIdx.x]; - OffsetT num_items = segment_end - segment_begin; - - // Check if empty segment - if (num_items <= 0) - return; - - // Upsweep - OffsetT bin_count = 0; // The count of each digit value in this pass (valid in the first RADIX_DIGITS threads) - BlockUpsweepT(temp_storage.upsweep, d_keys_in, current_bit, pass_bits).ProcessRegion( - segment_begin, - segment_end, - bin_count); - - CTA_SYNC(); - - if (IS_DESCENDING) - { - // Reverse bin counts - if (threadIdx.x < RADIX_DIGITS) - temp_storage.reverse_counts_in[threadIdx.x] = bin_count; - - CTA_SYNC(); - - if (threadIdx.x < RADIX_DIGITS) - bin_count = temp_storage.reverse_counts_in[RADIX_DIGITS - threadIdx.x - 1]; - } - - // Scan - OffsetT bin_offset; // The global scatter base offset for each digit value in this pass (valid in the first RADIX_DIGITS threads) - DigitScanT(temp_storage.scan).ExclusiveSum(bin_count, bin_offset); - bin_offset += segment_begin; - - if (IS_DESCENDING) - { - // Reverse bin offsets - if (threadIdx.x < RADIX_DIGITS) - temp_storage.reverse_counts_out[threadIdx.x] = bin_offset; - - CTA_SYNC(); - - if (threadIdx.x < RADIX_DIGITS) - bin_offset = temp_storage.reverse_counts_out[RADIX_DIGITS - threadIdx.x - 1]; - } - - CTA_SYNC(); - - // Downsweep - BlockDownsweepT(temp_storage.downsweep, num_items, bin_offset, d_keys_in, d_keys_out, d_values_in, d_values_out, current_bit, pass_bits).ProcessRegion( - segment_begin, segment_end); -} - - - -/****************************************************************************** - * Policy - ******************************************************************************/ - -/** - * Tuning policy for kernel specialization - */ -template < - typename KeyT, ///< Key type - typename ValueT, ///< Value type - typename OffsetT> ///< Signed integer type for global offsets -struct DeviceRadixSortPolicy -{ - //------------------------------------------------------------------------------ - // Constants - //------------------------------------------------------------------------------ - - enum - { - // Whether this is a keys-only (or key-value) sort - KEYS_ONLY = (Equals::VALUE), - - // Relative size of KeyT type to a 4-byte word - SCALE_FACTOR_4B = (CUB_MAX(sizeof(KeyT), sizeof(ValueT)) + 3) / 4, - }; - - //------------------------------------------------------------------------------ - // Architecture-specific tuning policies - //------------------------------------------------------------------------------ - - /// SM13 - struct Policy130 : ChainedPolicy<130, Policy130, Policy130> - { - enum { - PRIMARY_RADIX_BITS = 5, - ALT_RADIX_BITS = PRIMARY_RADIX_BITS - 1, - }; - - // Keys-only upsweep policies - typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 19 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyKeys; - typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyKeys; - - // Key-value pairs upsweep policies - typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 19 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyPairs; - typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyPairs; - - // Upsweep policies - typedef typename If::Type UpsweepPolicy; - typedef typename If::Type AltUpsweepPolicy; - - // Scan policy - typedef AgentScanPolicy <256, 4, BLOCK_LOAD_VECTORIZE, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, BLOCK_SCAN_WARP_SCANS> ScanPolicy; - - // Keys-only downsweep policies - typedef AgentRadixSortDownsweepPolicy <64, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicyKeys; - typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, ALT_RADIX_BITS> AltDownsweepPolicyKeys; - - // Key-value pairs downsweep policies - typedef AgentRadixSortDownsweepPolicy <64, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicyPairs; - typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, ALT_RADIX_BITS> AltDownsweepPolicyPairs; - - // Downsweep policies - typedef typename If::Type DownsweepPolicy; - typedef typename If::Type AltDownsweepPolicy; - - // Single-tile policy - typedef DownsweepPolicy SingleTilePolicy; - - // Segmented policies - typedef DownsweepPolicy SegmentedPolicy; - typedef AltDownsweepPolicy AltSegmentedPolicy; - }; - - /// SM20 - struct Policy200 : ChainedPolicy<200, Policy200, Policy130> - { - enum { - PRIMARY_RADIX_BITS = 5, - ALT_RADIX_BITS = PRIMARY_RADIX_BITS - 1, - }; - - // Keys-only upsweep policies - typedef AgentRadixSortUpsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyKeys; - typedef AgentRadixSortUpsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyKeys; - - // Key-value pairs upsweep policies - typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyPairs; - typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyPairs; - - // Upsweep policies - typedef typename If::Type UpsweepPolicy; - typedef typename If::Type AltUpsweepPolicy; - - // Scan policy - typedef AgentScanPolicy <512, 4, BLOCK_LOAD_VECTORIZE, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy; - - // Keys-only downsweep policies - typedef AgentRadixSortDownsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicyKeys; - typedef AgentRadixSortDownsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, ALT_RADIX_BITS> AltDownsweepPolicyKeys; - - // Key-value pairs downsweep policies - typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicyPairs; - typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, ALT_RADIX_BITS> AltDownsweepPolicyPairs; - - // Downsweep policies - typedef typename If::Type DownsweepPolicy; - typedef typename If::Type AltDownsweepPolicy; - - // Single-tile policy - typedef DownsweepPolicy SingleTilePolicy; - - // Segmented policies - typedef DownsweepPolicy SegmentedPolicy; - typedef AltDownsweepPolicy AltSegmentedPolicy; - }; - - /// SM30 - struct Policy300 : ChainedPolicy<300, Policy300, Policy200> - { - enum { - PRIMARY_RADIX_BITS = 5, - ALT_RADIX_BITS = PRIMARY_RADIX_BITS - 1, - }; - - // Keys-only upsweep policies - typedef AgentRadixSortUpsweepPolicy <256, CUB_MAX(1, 7 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyKeys; - typedef AgentRadixSortUpsweepPolicy <256, CUB_MAX(1, 7 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyKeys; - - // Key-value pairs upsweep policies - typedef AgentRadixSortUpsweepPolicy <256, CUB_MAX(1, 5 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyPairs; - typedef AgentRadixSortUpsweepPolicy <256, CUB_MAX(1, 5 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyPairs; - - // Upsweep policies - typedef typename If::Type UpsweepPolicy; - typedef typename If::Type AltUpsweepPolicy; - - // Scan policy - typedef AgentScanPolicy <1024, 4, BLOCK_LOAD_VECTORIZE, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, BLOCK_SCAN_WARP_SCANS> ScanPolicy; - - // Keys-only downsweep policies - typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 14 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicyKeys; - typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 14 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, ALT_RADIX_BITS> AltDownsweepPolicyKeys; - - // Key-value pairs downsweep policies - typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 10 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicyPairs; - typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 10 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, ALT_RADIX_BITS> AltDownsweepPolicyPairs; - - // Downsweep policies - typedef typename If::Type DownsweepPolicy; - typedef typename If::Type AltDownsweepPolicy; - - // Single-tile policy - typedef DownsweepPolicy SingleTilePolicy; - - // Segmented policies - typedef DownsweepPolicy SegmentedPolicy; - typedef AltDownsweepPolicy AltSegmentedPolicy; - }; - - - /// SM35 - struct Policy350 : ChainedPolicy<350, Policy350, Policy300> - { - enum { - PRIMARY_RADIX_BITS = 5, // 1.72B 32b keys/s, 1.17B 32b pairs/s, 1.55B 32b segmented keys/s (K40m) - }; - - // Scan policy - typedef AgentScanPolicy <1024, 4, BLOCK_LOAD_VECTORIZE, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, BLOCK_SCAN_WARP_SCANS> ScanPolicy; - - // Keys-only downsweep policies - typedef AgentRadixSortDownsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicyKeys; - typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 11 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS - 1> AltDownsweepPolicyKeys; - - // Key-value pairs downsweep policies - typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicyPairs; - typedef AltDownsweepPolicyKeys AltDownsweepPolicyPairs; - - // Downsweep policies - typedef typename If::Type DownsweepPolicy; - typedef typename If::Type AltDownsweepPolicy; - - // Upsweep policies - typedef DownsweepPolicy UpsweepPolicy; - typedef AltDownsweepPolicy AltUpsweepPolicy; - - // Single-tile policy - typedef DownsweepPolicy SingleTilePolicy; - - // Segmented policies - typedef DownsweepPolicy SegmentedPolicy; - typedef AltDownsweepPolicy AltSegmentedPolicy; - }; - - - /// SM50 - struct Policy500 : ChainedPolicy<500, Policy500, Policy350> - { - enum { - PRIMARY_RADIX_BITS = 7, // 3.5B 32b keys/s, 1.92B 32b pairs/s (TitanX) - SINGLE_TILE_RADIX_BITS = 6, - SEGMENTED_RADIX_BITS = 6, // 3.1B 32b segmented keys/s (TitanX) - }; - - // ScanPolicy - typedef AgentScanPolicy <512, 23, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy; - - // Downsweep policies - typedef AgentRadixSortDownsweepPolicy <160, CUB_MAX(1, 39 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicy; - typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 16 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLOCK_SCAN_RAKING_MEMOIZE, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS - 1> AltDownsweepPolicy; - - // Upsweep policies - typedef DownsweepPolicy UpsweepPolicy; - typedef AltDownsweepPolicy AltUpsweepPolicy; - - // Single-tile policy - typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, SINGLE_TILE_RADIX_BITS> SingleTilePolicy; - - // Segmented policies - typedef AgentRadixSortDownsweepPolicy <192, CUB_MAX(1, 31 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, SEGMENTED_RADIX_BITS> SegmentedPolicy; - typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 11 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, SEGMENTED_RADIX_BITS - 1> AltSegmentedPolicy; - }; - - - /// SM60 (GP100) - struct Policy600 : ChainedPolicy<600, Policy600, Policy500> - { - enum { - PRIMARY_RADIX_BITS = 6, - }; - - // ScanPolicy - typedef AgentScanPolicy <512, 23, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy; - - // Downsweep policies - typedef AgentRadixSortDownsweepPolicy <192, CUB_MAX(1, 39 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicy; - typedef AgentRadixSortDownsweepPolicy <384, CUB_MAX(1, 11 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS - 1> AltDownsweepPolicy; - - // Upsweep policies - typedef DownsweepPolicy UpsweepPolicy; - typedef AltDownsweepPolicy AltUpsweepPolicy; - - // Single-tile policy - typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> SingleTilePolicy; - - // Segmented policies - typedef DownsweepPolicy SegmentedPolicy; - typedef AltDownsweepPolicy AltSegmentedPolicy; - }; - - - /// SM61 (GP104) - struct Policy610 : ChainedPolicy<610, Policy610, Policy600> - { - enum { - PRIMARY_RADIX_BITS = 7, // 3.4B 32b keys/s, 1.83B 32b pairs/s (1080) - SINGLE_TILE_RADIX_BITS = 6, - SEGMENTED_RADIX_BITS = 6, // 3.3B 32b segmented keys/s (1080) - }; - - // ScanPolicy - typedef AgentScanPolicy <512, 23, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy; - - // Downsweep policies - typedef AgentRadixSortDownsweepPolicy <160, CUB_MAX(1, 53 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, KEYS_ONLY, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicy; - typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 35 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS - 1> AltDownsweepPolicy; - - // Upsweep policies - typedef DownsweepPolicy UpsweepPolicy; - typedef AltDownsweepPolicy AltUpsweepPolicy; - - // Single-tile policy - typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, SINGLE_TILE_RADIX_BITS> SingleTilePolicy; - - // Segmented policies - typedef AgentRadixSortDownsweepPolicy <192, CUB_MAX(1, 39 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, SEGMENTED_RADIX_BITS> SegmentedPolicy; - typedef AgentRadixSortDownsweepPolicy <384, CUB_MAX(1, 11 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, SEGMENTED_RADIX_BITS - 1> AltSegmentedPolicy; - }; - - - /// SM62 (Tegra, less RF) - struct Policy620 : ChainedPolicy<620, Policy620, Policy610> - { - enum { - PRIMARY_RADIX_BITS = 5, - ALT_RADIX_BITS = PRIMARY_RADIX_BITS - 1, - }; - - // ScanPolicy - typedef AgentScanPolicy <512, 23, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy; - - // Downsweep policies - typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 16 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLOCK_SCAN_RAKING_MEMOIZE, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> DownsweepPolicy; - typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 16 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLOCK_SCAN_RAKING_MEMOIZE, RADIX_SORT_SCATTER_TWO_PHASE, ALT_RADIX_BITS> AltDownsweepPolicy; - - // Upsweep policies - typedef DownsweepPolicy UpsweepPolicy; - typedef AltDownsweepPolicy AltUpsweepPolicy; - - // Single-tile policy - typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, PRIMARY_RADIX_BITS> SingleTilePolicy; - - // Segmented policies - typedef DownsweepPolicy SegmentedPolicy; - typedef AltDownsweepPolicy AltSegmentedPolicy; - }; - - /// MaxPolicy - typedef Policy620 MaxPolicy; -}; - - - -/****************************************************************************** - * Single-problem dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for device-wide radix sort - */ -template < - bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low - typename KeyT, ///< Key type - typename ValueT, ///< Value type - typename OffsetT> ///< Signed integer type for global offsets -struct DispatchRadixSort : - DeviceRadixSortPolicy -{ - //------------------------------------------------------------------------------ - // Constants - //------------------------------------------------------------------------------ - - enum - { - // Whether this is a keys-only (or key-value) sort - KEYS_ONLY = (Equals::VALUE), - }; - - - //------------------------------------------------------------------------------ - // Problem state - //------------------------------------------------------------------------------ - - void *d_temp_storage; ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes; ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys; ///< [in,out] Double-buffer whose current buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - DoubleBuffer &d_values; ///< [in,out] Double-buffer whose current buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values - OffsetT num_items; ///< [in] Number of items to sort - int begin_bit; ///< [in] The beginning (least-significant) bit index needed for key comparison - int end_bit; ///< [in] The past-the-end (most-significant) bit index needed for key comparison - cudaStream_t stream; ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous; ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - int ptx_version; ///< [in] PTX version - bool is_overwrite_okay; ///< [in] Whether is okay to overwrite source buffers - - - //------------------------------------------------------------------------------ - // Constructor - //------------------------------------------------------------------------------ - - /// Constructor - CUB_RUNTIME_FUNCTION __forceinline__ - DispatchRadixSort( - void* d_temp_storage, - size_t &temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - OffsetT num_items, - int begin_bit, - int end_bit, - bool is_overwrite_okay, - cudaStream_t stream, - bool debug_synchronous, - int ptx_version) - : - d_temp_storage(d_temp_storage), - temp_storage_bytes(temp_storage_bytes), - d_keys(d_keys), - d_values(d_values), - num_items(num_items), - begin_bit(begin_bit), - end_bit(end_bit), - stream(stream), - debug_synchronous(debug_synchronous), - ptx_version(ptx_version), - is_overwrite_okay(is_overwrite_okay) - {} - - - //------------------------------------------------------------------------------ - // Small-problem (single tile) invocation - //------------------------------------------------------------------------------ - - /// Invoke a single block to sort in-core - template < - typename ActivePolicyT, ///< Umbrella policy active for the target device - typename SingleTileKernelT> ///< Function type of cub::DeviceRadixSortSingleTileKernel - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t InvokeSingleTile( - SingleTileKernelT single_tile_kernel) ///< [in] Kernel function pointer to parameterization of cub::DeviceRadixSortSingleTileKernel - { -#ifndef CUB_RUNTIME_ENABLED - (void)single_tile_kernel; - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported ); -#else - cudaError error = cudaSuccess; - do - { - // Return if the caller is simply requesting the size of the storage allocation - if (d_temp_storage == NULL) - { - temp_storage_bytes = 1; - break; - } - - // Return if empty problem - if (num_items == 0) - break; - - // Log single_tile_kernel configuration - if (debug_synchronous) - _CubLog("Invoking single_tile_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy, current bit %d, bit_grain %d\n", - 1, ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, (long long) stream, - ActivePolicyT::SingleTilePolicy::ITEMS_PER_THREAD, 1, begin_bit, ActivePolicyT::SingleTilePolicy::RADIX_BITS); - - // Invoke upsweep_kernel with same grid size as downsweep_kernel - single_tile_kernel<<<1, ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, 0, stream>>>( - d_keys.Current(), - d_keys.Alternate(), - d_values.Current(), - d_values.Alternate(), - num_items, - begin_bit, - end_bit); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - // Update selector - d_keys.selector ^= 1; - d_values.selector ^= 1; - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - //------------------------------------------------------------------------------ - // Normal problem size invocation - //------------------------------------------------------------------------------ - - /** - * Invoke a three-kernel sorting pass at the current bit. - */ - template - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t InvokePass( - const KeyT *d_keys_in, - KeyT *d_keys_out, - const ValueT *d_values_in, - ValueT *d_values_out, - OffsetT *d_spine, - int spine_length, - int ¤t_bit, - PassConfigT &pass_config) - { - cudaError error = cudaSuccess; - do - { - int pass_bits = CUB_MIN(pass_config.radix_bits, (end_bit - current_bit)); - - // Log upsweep_kernel configuration - if (debug_synchronous) - _CubLog("Invoking upsweep_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy, current bit %d, bit_grain %d\n", - pass_config.even_share.grid_size, pass_config.upsweep_config.block_threads, (long long) stream, - pass_config.upsweep_config.items_per_thread, pass_config.upsweep_config.sm_occupancy, current_bit, pass_bits); - - // Invoke upsweep_kernel with same grid size as downsweep_kernel - pass_config.upsweep_kernel<<>>( - d_keys_in, - d_spine, - num_items, - current_bit, - pass_bits, - pass_config.even_share); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - // Log scan_kernel configuration - if (debug_synchronous) _CubLog("Invoking scan_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread\n", - 1, pass_config.scan_config.block_threads, (long long) stream, pass_config.scan_config.items_per_thread); - - // Invoke scan_kernel - pass_config.scan_kernel<<<1, pass_config.scan_config.block_threads, 0, stream>>>( - d_spine, - spine_length); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - // Log downsweep_kernel configuration - if (debug_synchronous) _CubLog("Invoking downsweep_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - pass_config.even_share.grid_size, pass_config.downsweep_config.block_threads, (long long) stream, - pass_config.downsweep_config.items_per_thread, pass_config.downsweep_config.sm_occupancy); - - // Invoke downsweep_kernel - pass_config.downsweep_kernel<<>>( - d_keys_in, - d_keys_out, - d_values_in, - d_values_out, - d_spine, - num_items, - current_bit, - pass_bits, - pass_config.even_share); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - // Update current bit - current_bit += pass_bits; - } - while (0); - - return error; - } - - - - /// Pass configuration structure - template < - typename UpsweepKernelT, - typename ScanKernelT, - typename DownsweepKernelT> - struct PassConfig - { - UpsweepKernelT upsweep_kernel; - KernelConfig upsweep_config; - ScanKernelT scan_kernel; - KernelConfig scan_config; - DownsweepKernelT downsweep_kernel; - KernelConfig downsweep_config; - int radix_bits; - int radix_digits; - int max_downsweep_grid_size; - GridEvenShare even_share; - - /// Initialize pass configuration - template < - typename UpsweepPolicyT, - typename ScanPolicyT, - typename DownsweepPolicyT> - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t InitPassConfig( - UpsweepKernelT upsweep_kernel, - ScanKernelT scan_kernel, - DownsweepKernelT downsweep_kernel, - int ptx_version, - int sm_count, - int num_items) - { - cudaError error = cudaSuccess; - do - { - this->upsweep_kernel = upsweep_kernel; - this->scan_kernel = scan_kernel; - this->downsweep_kernel = downsweep_kernel; - radix_bits = DownsweepPolicyT::RADIX_BITS; - radix_digits = 1 << radix_bits; - - if (CubDebug(error = upsweep_config.Init(upsweep_kernel))) break; - if (CubDebug(error = scan_config.Init(scan_kernel))) break; - if (CubDebug(error = downsweep_config.Init(downsweep_kernel))) break; - - max_downsweep_grid_size = (downsweep_config.sm_occupancy * sm_count) * CUB_SUBSCRIPTION_FACTOR(ptx_version); - - even_share = GridEvenShare( - num_items, - max_downsweep_grid_size, - CUB_MAX(downsweep_config.tile_size, upsweep_config.tile_size)); - - } - while (0); - return error; - } - - }; - - - /// Invocation (run multiple digit passes) - template < - typename ActivePolicyT, ///< Umbrella policy active for the target device - typename UpsweepKernelT, ///< Function type of cub::DeviceRadixSortUpsweepKernel - typename ScanKernelT, ///< Function type of cub::SpineScanKernel - typename DownsweepKernelT> ///< Function type of cub::DeviceRadixSortDownsweepKernel - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t InvokePasses( - UpsweepKernelT upsweep_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceRadixSortUpsweepKernel - UpsweepKernelT alt_upsweep_kernel, ///< [in] Alternate kernel function pointer to parameterization of cub::DeviceRadixSortUpsweepKernel - ScanKernelT scan_kernel, ///< [in] Kernel function pointer to parameterization of cub::SpineScanKernel - DownsweepKernelT downsweep_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceRadixSortDownsweepKernel - DownsweepKernelT alt_downsweep_kernel) ///< [in] Alternate kernel function pointer to parameterization of cub::DeviceRadixSortDownsweepKernel - { -#ifndef CUB_RUNTIME_ENABLED - (void)upsweep_kernel; - (void)alt_upsweep_kernel; - (void)scan_kernel; - (void)downsweep_kernel; - (void)alt_downsweep_kernel; - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported ); -#else - - cudaError error = cudaSuccess; - do - { - // Get device ordinal - int device_ordinal; - if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; - - // Get SM count - int sm_count; - if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; - - // Init regular and alternate-digit kernel configurations - PassConfig pass_config, alt_pass_config; - if ((error = pass_config.template InitPassConfig< - typename ActivePolicyT::UpsweepPolicy, - typename ActivePolicyT::ScanPolicy, - typename ActivePolicyT::DownsweepPolicy>( - upsweep_kernel, scan_kernel, downsweep_kernel, ptx_version, sm_count, num_items))) break; - - if ((error = alt_pass_config.template InitPassConfig< - typename ActivePolicyT::AltUpsweepPolicy, - typename ActivePolicyT::ScanPolicy, - typename ActivePolicyT::AltDownsweepPolicy>( - alt_upsweep_kernel, scan_kernel, alt_downsweep_kernel, ptx_version, sm_count, num_items))) break; - - // Get maximum spine length - int max_grid_size = CUB_MAX(pass_config.max_downsweep_grid_size, alt_pass_config.max_downsweep_grid_size); - int spine_length = (max_grid_size * pass_config.radix_digits) + pass_config.scan_config.tile_size; - - // Temporary storage allocation requirements - void* allocations[3]; - size_t allocation_sizes[3] = - { - spine_length * sizeof(OffsetT), // bytes needed for privatized block digit histograms - (is_overwrite_okay) ? 0 : num_items * sizeof(KeyT), // bytes needed for 3rd keys buffer - (is_overwrite_okay || (KEYS_ONLY)) ? 0 : num_items * sizeof(ValueT), // bytes needed for 3rd values buffer - }; - - // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - - // Return if the caller is simply requesting the size of the storage allocation - if (d_temp_storage == NULL) - return cudaSuccess; - - // Pass planning. Run passes of the alternate digit-size configuration until we have an even multiple of our preferred digit size - int num_bits = end_bit - begin_bit; - int num_passes = (num_bits + pass_config.radix_bits - 1) / pass_config.radix_bits; - bool is_num_passes_odd = num_passes & 1; - int max_alt_passes = (num_passes * pass_config.radix_bits) - num_bits; - int alt_end_bit = CUB_MIN(end_bit, begin_bit + (max_alt_passes * alt_pass_config.radix_bits)); - - // Alias the temporary storage allocations - OffsetT *d_spine = static_cast(allocations[0]); - - DoubleBuffer d_keys_remaining_passes( - (is_overwrite_okay || is_num_passes_odd) ? d_keys.Alternate() : static_cast(allocations[1]), - (is_overwrite_okay) ? d_keys.Current() : (is_num_passes_odd) ? static_cast(allocations[1]) : d_keys.Alternate()); - - DoubleBuffer d_values_remaining_passes( - (is_overwrite_okay || is_num_passes_odd) ? d_values.Alternate() : static_cast(allocations[2]), - (is_overwrite_okay) ? d_values.Current() : (is_num_passes_odd) ? static_cast(allocations[2]) : d_values.Alternate()); - - // Run first pass, consuming from the input's current buffers - int current_bit = begin_bit; - if (CubDebug(error = InvokePass( - d_keys.Current(), d_keys_remaining_passes.Current(), - d_values.Current(), d_values_remaining_passes.Current(), - d_spine, spine_length, current_bit, - (current_bit < alt_end_bit) ? alt_pass_config : pass_config))) break; - - // Run remaining passes - while (current_bit < end_bit) - { - if (CubDebug(error = InvokePass( - d_keys_remaining_passes.d_buffers[d_keys_remaining_passes.selector], d_keys_remaining_passes.d_buffers[d_keys_remaining_passes.selector ^ 1], - d_values_remaining_passes.d_buffers[d_keys_remaining_passes.selector], d_values_remaining_passes.d_buffers[d_keys_remaining_passes.selector ^ 1], - d_spine, spine_length, current_bit, - (current_bit < alt_end_bit) ? alt_pass_config : pass_config))) break;; - - // Invert selectors - d_keys_remaining_passes.selector ^= 1; - d_values_remaining_passes.selector ^= 1; - } - - // Update selector - if (!is_overwrite_okay) { - num_passes = 1; // Sorted data always ends up in the other vector - } - - d_keys.selector = (d_keys.selector + num_passes) & 1; - d_values.selector = (d_values.selector + num_passes) & 1; - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - //------------------------------------------------------------------------------ - // Chained policy invocation - //------------------------------------------------------------------------------ - - /// Invocation - template - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t Invoke() - { - typedef typename DispatchRadixSort::MaxPolicy MaxPolicyT; - typedef typename ActivePolicyT::SingleTilePolicy SingleTilePolicyT; - - // Force kernel code-generation in all compiler passes - if (num_items <= (SingleTilePolicyT::BLOCK_THREADS * SingleTilePolicyT::ITEMS_PER_THREAD)) - { - // Small, single tile size - return InvokeSingleTile( - DeviceRadixSortSingleTileKernel); - } - else - { - // Regular size - return InvokePasses( - DeviceRadixSortUpsweepKernel< MaxPolicyT, false, IS_DESCENDING, KeyT, OffsetT>, - DeviceRadixSortUpsweepKernel< MaxPolicyT, true, IS_DESCENDING, KeyT, OffsetT>, - RadixSortScanBinsKernel< MaxPolicyT, OffsetT>, - DeviceRadixSortDownsweepKernel< MaxPolicyT, false, IS_DESCENDING, KeyT, ValueT, OffsetT>, - DeviceRadixSortDownsweepKernel< MaxPolicyT, true, IS_DESCENDING, KeyT, ValueT, OffsetT>); - } - } - - - //------------------------------------------------------------------------------ - // Dispatch entrypoints - //------------------------------------------------------------------------------ - - /** - * Internal dispatch routine - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys, ///< [in,out] Double-buffer whose current buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - DoubleBuffer &d_values, ///< [in,out] Double-buffer whose current buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values - OffsetT num_items, ///< [in] Number of items to sort - int begin_bit, ///< [in] The beginning (least-significant) bit index needed for key comparison - int end_bit, ///< [in] The past-the-end (most-significant) bit index needed for key comparison - bool is_overwrite_okay, ///< [in] Whether is okay to overwrite source buffers - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous) ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - typedef typename DispatchRadixSort::MaxPolicy MaxPolicyT; - - cudaError_t error; - do { - // Get PTX version - int ptx_version; - if (CubDebug(error = PtxVersion(ptx_version))) break; - - // Create dispatch functor - DispatchRadixSort dispatch( - d_temp_storage, temp_storage_bytes, - d_keys, d_values, - num_items, begin_bit, end_bit, is_overwrite_okay, - stream, debug_synchronous, ptx_version); - - // Dispatch to chained policy - if (CubDebug(error = MaxPolicyT::Invoke(ptx_version, dispatch))) break; - - } while (0); - - return error; - } -}; - - - - -/****************************************************************************** - * Segmented dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for segmented device-wide radix sort - */ -template < - bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low - typename KeyT, ///< Key type - typename ValueT, ///< Value type - typename OffsetT> ///< Signed integer type for global offsets -struct DispatchSegmentedRadixSort : - DeviceRadixSortPolicy -{ - //------------------------------------------------------------------------------ - // Constants - //------------------------------------------------------------------------------ - - enum - { - // Whether this is a keys-only (or key-value) sort - KEYS_ONLY = (Equals::VALUE), - }; - - - //------------------------------------------------------------------------------ - // Parameter members - //------------------------------------------------------------------------------ - - void *d_temp_storage; ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes; ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys; ///< [in,out] Double-buffer whose current buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - DoubleBuffer &d_values; ///< [in,out] Double-buffer whose current buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values - OffsetT num_items; ///< [in] Number of items to sort - OffsetT num_segments; ///< [in] The number of segments that comprise the sorting data - const OffsetT *d_begin_offsets; ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - const OffsetT *d_end_offsets; ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int begin_bit; ///< [in] The beginning (least-significant) bit index needed for key comparison - int end_bit; ///< [in] The past-the-end (most-significant) bit index needed for key comparison - cudaStream_t stream; ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous; ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - int ptx_version; ///< [in] PTX version - bool is_overwrite_okay; ///< [in] Whether is okay to overwrite source buffers - - - //------------------------------------------------------------------------------ - // Constructors - //------------------------------------------------------------------------------ - - /// Constructor - CUB_RUNTIME_FUNCTION __forceinline__ - DispatchSegmentedRadixSort( - void* d_temp_storage, - size_t &temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - OffsetT num_items, - OffsetT num_segments, - const OffsetT *d_begin_offsets, - const OffsetT *d_end_offsets, - int begin_bit, - int end_bit, - bool is_overwrite_okay, - cudaStream_t stream, - bool debug_synchronous, - int ptx_version) - : - d_temp_storage(d_temp_storage), - temp_storage_bytes(temp_storage_bytes), - d_keys(d_keys), - d_values(d_values), - num_items(num_items), - num_segments(num_segments), - d_begin_offsets(d_begin_offsets), - d_end_offsets(d_end_offsets), - begin_bit(begin_bit), - end_bit(end_bit), - is_overwrite_okay(is_overwrite_okay), - stream(stream), - debug_synchronous(debug_synchronous), - ptx_version(ptx_version) - {} - - - //------------------------------------------------------------------------------ - // Multi-segment invocation - //------------------------------------------------------------------------------ - - /// Invoke a three-kernel sorting pass at the current bit. - template - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t InvokePass( - const KeyT *d_keys_in, - KeyT *d_keys_out, - const ValueT *d_values_in, - ValueT *d_values_out, - int ¤t_bit, - PassConfigT &pass_config) - { - cudaError error = cudaSuccess; - do - { - int pass_bits = CUB_MIN(pass_config.radix_bits, (end_bit - current_bit)); - - // Log kernel configuration - if (debug_synchronous) - _CubLog("Invoking segmented_kernels<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy, current bit %d, bit_grain %d\n", - num_segments, pass_config.segmented_config.block_threads, (long long) stream, - pass_config.segmented_config.items_per_thread, pass_config.segmented_config.sm_occupancy, current_bit, pass_bits); - - pass_config.segmented_kernel<<>>( - d_keys_in, d_keys_out, - d_values_in, d_values_out, - d_begin_offsets, d_end_offsets, num_segments, - current_bit, pass_bits); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - // Update current bit - current_bit += pass_bits; - } - while (0); - - return error; - } - - - /// PassConfig data structure - template - struct PassConfig - { - SegmentedKernelT segmented_kernel; - KernelConfig segmented_config; - int radix_bits; - int radix_digits; - - /// Initialize pass configuration - template - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t InitPassConfig(SegmentedKernelT segmented_kernel) - { - this->segmented_kernel = segmented_kernel; - this->radix_bits = SegmentedPolicyT::RADIX_BITS; - this->radix_digits = 1 << radix_bits; - - return CubDebug(segmented_config.Init(segmented_kernel)); - } - }; - - - /// Invocation (run multiple digit passes) - template < - typename ActivePolicyT, ///< Umbrella policy active for the target device - typename SegmentedKernelT> ///< Function type of cub::DeviceSegmentedRadixSortKernel - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t InvokePasses( - SegmentedKernelT segmented_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceSegmentedRadixSortKernel - SegmentedKernelT alt_segmented_kernel) ///< [in] Alternate kernel function pointer to parameterization of cub::DeviceSegmentedRadixSortKernel - { -#ifndef CUB_RUNTIME_ENABLED - (void)segmented_kernel; - (void)alt_segmented_kernel; - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported ); -#else - - cudaError error = cudaSuccess; - do - { - // Init regular and alternate kernel configurations - PassConfig pass_config, alt_pass_config; - if ((error = pass_config.template InitPassConfig(segmented_kernel))) break; - if ((error = alt_pass_config.template InitPassConfig(alt_segmented_kernel))) break; - - // Temporary storage allocation requirements - void* allocations[2]; - size_t allocation_sizes[2] = - { - (is_overwrite_okay) ? 0 : num_items * sizeof(KeyT), // bytes needed for 3rd keys buffer - (is_overwrite_okay || (KEYS_ONLY)) ? 0 : num_items * sizeof(ValueT), // bytes needed for 3rd values buffer - }; - - // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - - // Return if the caller is simply requesting the size of the storage allocation - if (d_temp_storage == NULL) - { - if (temp_storage_bytes == 0) - temp_storage_bytes = 1; - return cudaSuccess; - } - - // Pass planning. Run passes of the alternate digit-size configuration until we have an even multiple of our preferred digit size - int radix_bits = ActivePolicyT::SegmentedPolicy::RADIX_BITS; - int alt_radix_bits = ActivePolicyT::AltSegmentedPolicy::RADIX_BITS; - int num_bits = end_bit - begin_bit; - int num_passes = (num_bits + radix_bits - 1) / radix_bits; - bool is_num_passes_odd = num_passes & 1; - int max_alt_passes = (num_passes * radix_bits) - num_bits; - int alt_end_bit = CUB_MIN(end_bit, begin_bit + (max_alt_passes * alt_radix_bits)); - - DoubleBuffer d_keys_remaining_passes( - (is_overwrite_okay || is_num_passes_odd) ? d_keys.Alternate() : static_cast(allocations[0]), - (is_overwrite_okay) ? d_keys.Current() : (is_num_passes_odd) ? static_cast(allocations[0]) : d_keys.Alternate()); - - DoubleBuffer d_values_remaining_passes( - (is_overwrite_okay || is_num_passes_odd) ? d_values.Alternate() : static_cast(allocations[1]), - (is_overwrite_okay) ? d_values.Current() : (is_num_passes_odd) ? static_cast(allocations[1]) : d_values.Alternate()); - - // Run first pass, consuming from the input's current buffers - int current_bit = begin_bit; - - if (CubDebug(error = InvokePass( - d_keys.Current(), d_keys_remaining_passes.Current(), - d_values.Current(), d_values_remaining_passes.Current(), - current_bit, - (current_bit < alt_end_bit) ? alt_pass_config : pass_config))) break; - - // Run remaining passes - while (current_bit < end_bit) - { - if (CubDebug(error = InvokePass( - d_keys_remaining_passes.d_buffers[d_keys_remaining_passes.selector], d_keys_remaining_passes.d_buffers[d_keys_remaining_passes.selector ^ 1], - d_values_remaining_passes.d_buffers[d_keys_remaining_passes.selector], d_values_remaining_passes.d_buffers[d_keys_remaining_passes.selector ^ 1], - current_bit, - (current_bit < alt_end_bit) ? alt_pass_config : pass_config))) break; - - // Invert selectors and update current bit - d_keys_remaining_passes.selector ^= 1; - d_values_remaining_passes.selector ^= 1; - } - - // Update selector - if (!is_overwrite_okay) { - num_passes = 1; // Sorted data always ends up in the other vector - } - - d_keys.selector = (d_keys.selector + num_passes) & 1; - d_values.selector = (d_values.selector + num_passes) & 1; - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - //------------------------------------------------------------------------------ - // Chained policy invocation - //------------------------------------------------------------------------------ - - /// Invocation - template - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t Invoke() - { - typedef typename DispatchSegmentedRadixSort::MaxPolicy MaxPolicyT; - - // Force kernel code-generation in all compiler passes - return InvokePasses( - DeviceSegmentedRadixSortKernel, - DeviceSegmentedRadixSortKernel); - } - - - //------------------------------------------------------------------------------ - // Dispatch entrypoints - //------------------------------------------------------------------------------ - - - /// Internal dispatch routine - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - DoubleBuffer &d_keys, ///< [in,out] Double-buffer whose current buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys - DoubleBuffer &d_values, ///< [in,out] Double-buffer whose current buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values - int num_items, ///< [in] Number of items to sort - int num_segments, ///< [in] The number of segments that comprise the sorting data - const int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - const int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int begin_bit, ///< [in] The beginning (least-significant) bit index needed for key comparison - int end_bit, ///< [in] The past-the-end (most-significant) bit index needed for key comparison - bool is_overwrite_okay, ///< [in] Whether is okay to overwrite source buffers - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous) ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - typedef typename DispatchSegmentedRadixSort::MaxPolicy MaxPolicyT; - - cudaError_t error; - do { - // Get PTX version - int ptx_version; - if (CubDebug(error = PtxVersion(ptx_version))) break; - - // Create dispatch functor - DispatchSegmentedRadixSort dispatch( - d_temp_storage, temp_storage_bytes, - d_keys, d_values, - num_items, num_segments, d_begin_offsets, d_end_offsets, - begin_bit, end_bit, is_overwrite_okay, - stream, debug_synchronous, ptx_version); - - // Dispatch to chained policy - if (CubDebug(error = MaxPolicyT::Invoke(ptx_version, dispatch))) break; - - } while (0); - - return error; - } -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/dispatch/dispatch_reduce.cuh b/ml-xgboost/cub/cub/device/dispatch/dispatch_reduce.cuh deleted file mode 100644 index a87f905..0000000 --- a/ml-xgboost/cub/cub/device/dispatch/dispatch_reduce.cuh +++ /dev/null @@ -1,928 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "../../agent/agent_reduce.cuh" -#include "../../iterator/arg_index_input_iterator.cuh" -#include "../../thread/thread_operators.cuh" -#include "../../grid/grid_even_share.cuh" -#include "../../grid/grid_queue.cuh" -#include "../../iterator/arg_index_input_iterator.cuh" -#include "../../util_debug.cuh" -#include "../../util_device.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/****************************************************************************** - * Kernel entry points - *****************************************************************************/ - -/** - * Reduce region kernel entry point (multi-block). Computes privatized reductions, one per thread block. - */ -template < - typename ChainedPolicyT, ///< Chained tuning policy - typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator - typename OutputIteratorT, ///< Output iterator type for recording the reduced aggregate \iterator - typename OffsetT, ///< Signed integer type for global offsets - typename ReductionOpT> ///< Binary reduction functor type having member T operator()(const T &a, const T &b) -__launch_bounds__ (int(ChainedPolicyT::ActivePolicy::ReducePolicy::BLOCK_THREADS)) -__global__ void DeviceReduceKernel( - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - OffsetT num_items, ///< [in] Total number of input data items - GridEvenShare even_share, ///< [in] Even-share descriptor for mapping an equal number of tiles onto each thread block - GridQueue queue, ///< [in] Drain queue descriptor for dynamically mapping tile data onto thread blocks - ReductionOpT reduction_op) ///< [in] Binary reduction functor -{ - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - // Thread block type for reducing input tiles - typedef AgentReduce< - typename ChainedPolicyT::ActivePolicy::ReducePolicy, - InputIteratorT, - OutputIteratorT, - OffsetT, - ReductionOpT> - AgentReduceT; - - // Shared memory storage - __shared__ typename AgentReduceT::TempStorage temp_storage; - - // Consume input tiles - OutputT block_aggregate = AgentReduceT(temp_storage, d_in, reduction_op).ConsumeTiles( - num_items, - even_share, - queue, - Int2Type()); - - // Output result - if (threadIdx.x == 0) - d_out[blockIdx.x] = block_aggregate; -} - - -/** - * Reduce a single tile kernel entry point (single-block). Can be used to aggregate privatized threadblock reductions from a previous multi-block reduction pass. - */ -template < - typename ChainedPolicyT, ///< Chained tuning policy - typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator - typename OutputIteratorT, ///< Output iterator type for recording the reduced aggregate \iterator - typename OffsetT, ///< Signed integer type for global offsets - typename ReductionOpT, ///< Binary reduction functor type having member T operator()(const T &a, const T &b) - typename OuputT> ///< Data element type that is convertible to the \p value type of \p OutputIteratorT -__launch_bounds__ (int(ChainedPolicyT::ActivePolicy::SingleTilePolicy::BLOCK_THREADS), 1) -__global__ void DeviceReduceSingleTileKernel( - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - OffsetT num_items, ///< [in] Total number of input data items - ReductionOpT reduction_op, ///< [in] Binary reduction functor - OuputT init) ///< [in] The initial value of the reduction -{ - // Thread block type for reducing input tiles - typedef AgentReduce< - typename ChainedPolicyT::ActivePolicy::SingleTilePolicy, - InputIteratorT, - OutputIteratorT, - OffsetT, - ReductionOpT> - AgentReduceT; - - // Shared memory storage - __shared__ typename AgentReduceT::TempStorage temp_storage; - - // Check if empty problem - if (num_items == 0) - { - if (threadIdx.x == 0) - *d_out = init; - return; - } - - // Consume input tiles - OuputT block_aggregate = AgentReduceT(temp_storage, d_in, reduction_op).ConsumeRange( - OffsetT(0), - num_items); - - // Output result - if (threadIdx.x == 0) - *d_out = reduction_op(init, block_aggregate); -} - - -/// Normalize input iterator to segment offset -template -__device__ __forceinline__ -void NormalizeReductionOutput( - T &/*val*/, - OffsetT /*base_offset*/, - IteratorT /*itr*/) -{} - - -/// Normalize input iterator to segment offset (specialized for arg-index) -template -__device__ __forceinline__ -void NormalizeReductionOutput( - KeyValuePairT &val, - OffsetT base_offset, - ArgIndexInputIterator /*itr*/) -{ - val.key -= base_offset; -} - - -/** - * Segmented reduction (one block per segment) - */ -template < - typename ChainedPolicyT, ///< Chained tuning policy - typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator - typename OutputIteratorT, ///< Output iterator type for recording the reduced aggregate \iterator - typename OffsetT, ///< Signed integer type for global offsets - typename ReductionOpT, ///< Binary reduction functor type having member T operator()(const T &a, const T &b) - typename OutputT> ///< Data element type that is convertible to the \p value type of \p OutputIteratorT -__launch_bounds__ (int(ChainedPolicyT::ActivePolicy::ReducePolicy::BLOCK_THREADS)) -__global__ void DeviceSegmentedReduceKernel( - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - int /*num_segments*/, ///< [in] The number of segments that comprise the sorting data - ReductionOpT reduction_op, ///< [in] Binary reduction functor - OutputT init) ///< [in] The initial value of the reduction -{ - // Thread block type for reducing input tiles - typedef AgentReduce< - typename ChainedPolicyT::ActivePolicy::ReducePolicy, - InputIteratorT, - OutputIteratorT, - OffsetT, - ReductionOpT> - AgentReduceT; - - // Shared memory storage - __shared__ typename AgentReduceT::TempStorage temp_storage; - - OffsetT segment_begin = d_begin_offsets[blockIdx.x]; - OffsetT segment_end = d_end_offsets[blockIdx.x]; - - // Check if empty problem - if (segment_begin == segment_end) - { - if (threadIdx.x == 0) - d_out[blockIdx.x] = init; - return; - } - - // Consume input tiles - OutputT block_aggregate = AgentReduceT(temp_storage, d_in, reduction_op).ConsumeRange( - segment_begin, - segment_end); - - // Normalize as needed - NormalizeReductionOutput(block_aggregate, segment_begin, d_in); - - if (threadIdx.x == 0) - d_out[blockIdx.x] = reduction_op(init, block_aggregate);; -} - - - - -/****************************************************************************** - * Policy - ******************************************************************************/ - -template < - typename OuputT, ///< Data type - typename OffsetT, ///< Signed integer type for global offsets - typename ReductionOpT> ///< Binary reduction functor type having member T operator()(const T &a, const T &b) -struct DeviceReducePolicy -{ - //------------------------------------------------------------------------------ - // Architecture-specific tuning policies - //------------------------------------------------------------------------------ - - /// SM13 - struct Policy130 : ChainedPolicy<130, Policy130, Policy130> - { - // ReducePolicy - typedef AgentReducePolicy< - CUB_NOMINAL_CONFIG(128, 8, OuputT), ///< Threads per block, items per thread - 2, ///< Number of items per vectorized load - BLOCK_REDUCE_RAKING, ///< Cooperative block-wide reduction algorithm to use - LOAD_DEFAULT, ///< Cache load modifier - GRID_MAPPING_EVEN_SHARE> ///< How to map tiles of input onto thread blocks - ReducePolicy; - - // SingleTilePolicy - typedef ReducePolicy SingleTilePolicy; - - // SegmentedReducePolicy - typedef ReducePolicy SegmentedReducePolicy; - }; - - - /// SM20 - struct Policy200 : ChainedPolicy<200, Policy200, Policy130> - { - // ReducePolicy (GTX 580: 178.9 GB/s @ 48M 4B items, 158.1 GB/s @ 192M 1B items) - typedef AgentReducePolicy< - CUB_NOMINAL_CONFIG(128, 8, OuputT), ///< Threads per block, items per thread - 4, ///< Number of items per vectorized load - BLOCK_REDUCE_RAKING, ///< Cooperative block-wide reduction algorithm to use - LOAD_DEFAULT, ///< Cache load modifier - GRID_MAPPING_DYNAMIC> ///< How to map tiles of input onto thread blocks - ReducePolicy; - - // SingleTilePolicy - typedef ReducePolicy SingleTilePolicy; - - // SegmentedReducePolicy - typedef ReducePolicy SegmentedReducePolicy; - }; - - - /// SM30 - struct Policy300 : ChainedPolicy<300, Policy300, Policy200> - { - // ReducePolicy (GTX670: 154.0 @ 48M 4B items) - typedef AgentReducePolicy< - CUB_NOMINAL_CONFIG(256, 20, OuputT), ///< Threads per block, items per thread - 2, ///< Number of items per vectorized load - BLOCK_REDUCE_WARP_REDUCTIONS, ///< Cooperative block-wide reduction algorithm to use - LOAD_DEFAULT, ///< Cache load modifier - GRID_MAPPING_EVEN_SHARE> ///< How to map tiles of input onto thread blocks - ReducePolicy; - - // SingleTilePolicy - typedef ReducePolicy SingleTilePolicy; - - // SegmentedReducePolicy - typedef ReducePolicy SegmentedReducePolicy; - }; - - - /// SM35 - struct Policy350 : ChainedPolicy<350, Policy350, Policy300> - { - // ReducePolicy (GTX Titan: 255.1 GB/s @ 48M 4B items; 228.7 GB/s @ 192M 1B items) - typedef AgentReducePolicy< - CUB_NOMINAL_CONFIG(256, 20, OuputT), ///< Threads per block, items per thread - 4, ///< Number of items per vectorized load - BLOCK_REDUCE_WARP_REDUCTIONS, ///< Cooperative block-wide reduction algorithm to use - LOAD_LDG, ///< Cache load modifier - GRID_MAPPING_DYNAMIC> ///< How to map tiles of input onto thread blocks - ReducePolicy; - - // SingleTilePolicy - typedef ReducePolicy SingleTilePolicy; - - // SegmentedReducePolicy - typedef ReducePolicy SegmentedReducePolicy; - }; - - /// SM60 - struct Policy600 : ChainedPolicy<600, Policy600, Policy350> - { - // ReducePolicy (P100: 591 GB/s @ 64M 4B items; 583 GB/s @ 256M 1B items) - typedef AgentReducePolicy< - CUB_NOMINAL_CONFIG(256, 16, OuputT), ///< Threads per block, items per thread - 4, ///< Number of items per vectorized load - BLOCK_REDUCE_WARP_REDUCTIONS, ///< Cooperative block-wide reduction algorithm to use - LOAD_LDG, ///< Cache load modifier - GRID_MAPPING_DYNAMIC> ///< How to map tiles of input onto thread blocks - ReducePolicy; - - // SingleTilePolicy - typedef ReducePolicy SingleTilePolicy; - - // SegmentedReducePolicy - typedef ReducePolicy SegmentedReducePolicy; - }; - - - /// MaxPolicy - typedef Policy600 MaxPolicy; - -}; - - - -/****************************************************************************** - * Single-problem dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for device-wide reduction - */ -template < - typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator - typename OutputIteratorT, ///< Output iterator type for recording the reduced aggregate \iterator - typename OffsetT, ///< Signed integer type for global offsets - typename ReductionOpT> ///< Binary reduction functor type having member T operator()(const T &a, const T &b) -struct DispatchReduce : - DeviceReducePolicy< - typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type, // ... else the output iterator's value type - OffsetT, - ReductionOpT> -{ - //------------------------------------------------------------------------------ - // Constants - //------------------------------------------------------------------------------ - - // Data type of output iterator - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - - //------------------------------------------------------------------------------ - // Problem state - //------------------------------------------------------------------------------ - - void *d_temp_storage; ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes; ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in; ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out; ///< [out] Pointer to the output aggregate - OffsetT num_items; ///< [in] Total number of input items (i.e., length of \p d_in) - ReductionOpT reduction_op; ///< [in] Binary reduction functor - OutputT init; ///< [in] The initial value of the reduction - cudaStream_t stream; ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous; ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - int ptx_version; ///< [in] PTX version - - //------------------------------------------------------------------------------ - // Constructor - //------------------------------------------------------------------------------ - - /// Constructor - CUB_RUNTIME_FUNCTION __forceinline__ - DispatchReduce( - void* d_temp_storage, - size_t &temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - OffsetT num_items, - ReductionOpT reduction_op, - OutputT init, - cudaStream_t stream, - bool debug_synchronous, - int ptx_version) - : - d_temp_storage(d_temp_storage), - temp_storage_bytes(temp_storage_bytes), - d_in(d_in), - d_out(d_out), - num_items(num_items), - reduction_op(reduction_op), - init(init), - stream(stream), - debug_synchronous(debug_synchronous), - ptx_version(ptx_version) - {} - - - //------------------------------------------------------------------------------ - // Small-problem (single tile) invocation - //------------------------------------------------------------------------------ - - /// Invoke a single block block to reduce in-core - template < - typename ActivePolicyT, ///< Umbrella policy active for the target device - typename SingleTileKernelT> ///< Function type of cub::DeviceReduceSingleTileKernel - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t InvokeSingleTile( - SingleTileKernelT single_tile_kernel) ///< [in] Kernel function pointer to parameterization of cub::DeviceReduceSingleTileKernel - { -#ifndef CUB_RUNTIME_ENABLED - (void)single_tile_kernel; - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported ); -#else - cudaError error = cudaSuccess; - do - { - // Return if the caller is simply requesting the size of the storage allocation - if (d_temp_storage == NULL) - { - temp_storage_bytes = 1; - break; - } - - // Log single_reduce_sweep_kernel configuration - if (debug_synchronous) _CubLog("Invoking DeviceReduceSingleTileKernel<<<1, %d, 0, %lld>>>(), %d items per thread\n", - ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, - (long long) stream, - ActivePolicyT::SingleTilePolicy::ITEMS_PER_THREAD); - - // Invoke single_reduce_sweep_kernel - single_tile_kernel<<<1, ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, 0, stream>>>( - d_in, - d_out, - num_items, - reduction_op, - init); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - //------------------------------------------------------------------------------ - // Normal problem size invocation (two-pass) - //------------------------------------------------------------------------------ - - /// Invoke two-passes to reduce - template < - typename ActivePolicyT, ///< Umbrella policy active for the target device - typename ReduceKernelT, ///< Function type of cub::DeviceReduceKernel - typename SingleTileKernelT, ///< Function type of cub::DeviceReduceSingleTileKernel - typename FillAndResetDrainKernelT> ///< Function type of cub::FillAndResetDrainKernel - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t InvokePasses( - ReduceKernelT reduce_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceReduceKernel - SingleTileKernelT single_tile_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceReduceSingleTileKernel - FillAndResetDrainKernelT prepare_drain_kernel) ///< [in] Kernel function pointer to parameterization of cub::FillAndResetDrainKernel - { -#ifndef CUB_RUNTIME_ENABLED - (void) reduce_kernel; - (void) single_tile_kernel; - (void) prepare_drain_kernel; - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported ); -#else - - cudaError error = cudaSuccess; - do - { - // Get device ordinal - int device_ordinal; - if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; - - // Get SM count - int sm_count; - if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; - - // Init regular kernel configuration - KernelConfig reduce_config; - if (CubDebug(error = reduce_config.Init(reduce_kernel))) break; - int reduce_device_occupancy = reduce_config.sm_occupancy * sm_count; - - // Even-share work distribution - int max_blocks = reduce_device_occupancy * CUB_SUBSCRIPTION_FACTOR(ptx_version); - GridEvenShare even_share(num_items, max_blocks, reduce_config.tile_size); - - // Temporary storage allocation requirements - void* allocations[2]; - size_t allocation_sizes[2] = - { - max_blocks * sizeof(OutputT), // bytes needed for privatized block reductions - GridQueue::AllocationSize() // bytes needed for grid queue descriptor - }; - - // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - return cudaSuccess; - } - - // Alias the allocation for the privatized per-block reductions - OutputT *d_block_reductions = (OutputT*) allocations[0]; - - // Alias the allocation for the grid queue descriptor - GridQueue queue(allocations[1]); - - // Get grid size for device_reduce_sweep_kernel - int reduce_grid_size; - if (ActivePolicyT::ReducePolicy::GRID_MAPPING == GRID_MAPPING_EVEN_SHARE) - { - // Work is distributed evenly - reduce_grid_size = even_share.grid_size; - } - else if (ActivePolicyT::ReducePolicy::GRID_MAPPING == GRID_MAPPING_DYNAMIC) - { - // Work is distributed dynamically - int num_tiles = (num_items + reduce_config.tile_size - 1) / reduce_config.tile_size; - reduce_grid_size = (num_tiles < reduce_device_occupancy) ? - num_tiles : // Not enough to fill the device with threadblocks - reduce_device_occupancy; // Fill the device with threadblocks - - // Prepare the dynamic queue descriptor if necessary - if (debug_synchronous) _CubLog("Invoking prepare_drain_kernel<<<1, 1, 0, %lld>>>()\n", (long long) stream); - - // Invoke prepare_drain_kernel - prepare_drain_kernel<<<1, 1, 0, stream>>>(queue, num_items); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } - else - { - error = CubDebug(cudaErrorNotSupported ); break; - } - - // Log device_reduce_sweep_kernel configuration - if (debug_synchronous) _CubLog("Invoking DeviceReduceKernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - reduce_grid_size, - ActivePolicyT::ReducePolicy::BLOCK_THREADS, - (long long) stream, - ActivePolicyT::ReducePolicy::ITEMS_PER_THREAD, - reduce_config.sm_occupancy); - - // Invoke DeviceReduceKernel - reduce_kernel<<>>( - d_in, - d_block_reductions, - num_items, - even_share, - queue, - reduction_op); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - // Log single_reduce_sweep_kernel configuration - if (debug_synchronous) _CubLog("Invoking DeviceReduceSingleTileKernel<<<1, %d, 0, %lld>>>(), %d items per thread\n", - ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, - (long long) stream, - ActivePolicyT::SingleTilePolicy::ITEMS_PER_THREAD); - - // Invoke DeviceReduceSingleTileKernel - single_tile_kernel<<<1, ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, 0, stream>>>( - d_block_reductions, - d_out, - reduce_grid_size, - reduction_op, - init); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - - } - - - //------------------------------------------------------------------------------ - // Chained policy invocation - //------------------------------------------------------------------------------ - - /// Invocation - template - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t Invoke() - { - typedef typename ActivePolicyT::SingleTilePolicy SingleTilePolicyT; - typedef typename DispatchReduce::MaxPolicy MaxPolicyT; - - // Force kernel code-generation in all compiler passes - if (num_items <= (SingleTilePolicyT::BLOCK_THREADS * SingleTilePolicyT::ITEMS_PER_THREAD)) - { - // Small, single tile size - return InvokeSingleTile( - DeviceReduceSingleTileKernel); - } - else - { - // Regular size - return InvokePasses( - DeviceReduceKernel, - DeviceReduceSingleTileKernel, - FillAndResetDrainKernel); - } - } - - - //------------------------------------------------------------------------------ - // Dispatch entrypoints - //------------------------------------------------------------------------------ - - /** - * Internal dispatch routine for computing a device-wide reduction - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - ReductionOpT reduction_op, ///< [in] Binary reduction functor - OutputT init, ///< [in] The initial value of the reduction - cudaStream_t stream, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - typedef typename DispatchReduce::MaxPolicy MaxPolicyT; - - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - if (CubDebug(error = PtxVersion(ptx_version))) break; - - // Create dispatch functor - DispatchReduce dispatch( - d_temp_storage, temp_storage_bytes, - d_in, d_out, num_items, reduction_op, init, - stream, debug_synchronous, ptx_version); - - // Dispatch to chained policy - if (CubDebug(error = MaxPolicyT::Invoke(ptx_version, dispatch))) break; - } - while (0); - - return error; - } -}; - - - -/****************************************************************************** - * Segmented dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for device-wide reduction - */ -template < - typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator - typename OutputIteratorT, ///< Output iterator type for recording the reduced aggregate \iterator - typename OffsetT, ///< Signed integer type for global offsets - typename ReductionOpT> ///< Binary reduction functor type having member T operator()(const T &a, const T &b) -struct DispatchSegmentedReduce : - DeviceReducePolicy< - typename std::iterator_traits::value_type, - OffsetT, - ReductionOpT> -{ - //------------------------------------------------------------------------------ - // Constants - //------------------------------------------------------------------------------ - - /// The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - - //------------------------------------------------------------------------------ - // Problem state - //------------------------------------------------------------------------------ - - void *d_temp_storage; ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes; ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in; ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out; ///< [out] Pointer to the output aggregate - OffsetT num_segments; ///< [in] The number of segments that comprise the sorting data - OffsetT *d_begin_offsets; ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - OffsetT *d_end_offsets; ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - ReductionOpT reduction_op; ///< [in] Binary reduction functor - OutputT init; ///< [in] The initial value of the reduction - cudaStream_t stream; ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous; ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - int ptx_version; ///< [in] PTX version - - //------------------------------------------------------------------------------ - // Constructor - //------------------------------------------------------------------------------ - - /// Constructor - CUB_RUNTIME_FUNCTION __forceinline__ - DispatchSegmentedReduce( - void* d_temp_storage, - size_t &temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - OffsetT num_segments, - OffsetT *d_begin_offsets, - OffsetT *d_end_offsets, - ReductionOpT reduction_op, - OutputT init, - cudaStream_t stream, - bool debug_synchronous, - int ptx_version) - : - d_temp_storage(d_temp_storage), - temp_storage_bytes(temp_storage_bytes), - d_in(d_in), - d_out(d_out), - num_segments(num_segments), - d_begin_offsets(d_begin_offsets), - d_end_offsets(d_end_offsets), - reduction_op(reduction_op), - init(init), - stream(stream), - debug_synchronous(debug_synchronous), - ptx_version(ptx_version) - {} - - - - //------------------------------------------------------------------------------ - // Chained policy invocation - //------------------------------------------------------------------------------ - - /// Invocation - template < - typename ActivePolicyT, ///< Umbrella policy active for the target device - typename DeviceSegmentedReduceKernelT> ///< Function type of cub::DeviceSegmentedReduceKernel - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t InvokePasses( - DeviceSegmentedReduceKernelT segmented_reduce_kernel) ///< [in] Kernel function pointer to parameterization of cub::DeviceSegmentedReduceKernel - { -#ifndef CUB_RUNTIME_ENABLED - (void)segmented_reduce_kernel; - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported ); -#else - cudaError error = cudaSuccess; - do - { - // Return if the caller is simply requesting the size of the storage allocation - if (d_temp_storage == NULL) - { - temp_storage_bytes = 1; - return cudaSuccess; - } - - // Init kernel configuration - KernelConfig segmented_reduce_config; - if (CubDebug(error = segmented_reduce_config.Init(segmented_reduce_kernel))) break; - - // Log device_reduce_sweep_kernel configuration - if (debug_synchronous) _CubLog("Invoking SegmentedDeviceReduceKernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - num_segments, - ActivePolicyT::SegmentedReducePolicy::BLOCK_THREADS, - (long long) stream, - ActivePolicyT::SegmentedReducePolicy::ITEMS_PER_THREAD, - segmented_reduce_config.sm_occupancy); - - // Invoke DeviceReduceKernel - segmented_reduce_kernel<<>>( - d_in, - d_out, - d_begin_offsets, - d_end_offsets, - num_segments, - reduction_op, - init); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - - } - - - /// Invocation - template - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t Invoke() - { - typedef typename DispatchSegmentedReduce::MaxPolicy MaxPolicyT; - - // Force kernel code-generation in all compiler passes - return InvokePasses( - DeviceSegmentedReduceKernel); - } - - - //------------------------------------------------------------------------------ - // Dispatch entrypoints - //------------------------------------------------------------------------------ - - /** - * Internal dispatch routine for computing a device-wide reduction - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output aggregate - int num_segments, ///< [in] The number of segments that comprise the sorting data - int *d_begin_offsets, ///< [in] %Device-accessible pointer to the sequence of beginning offsets of length \p num_segments, such that d_begin_offsets[i] is the first element of the ith data segment in d_keys_* and d_values_* - int *d_end_offsets, ///< [in] %Device-accessible pointer to the sequence of ending offsets of length \p num_segments, such that d_end_offsets[i]-1 is the last element of the ith data segment in d_keys_* and d_values_*. If d_end_offsets[i]-1 <= d_begin_offsets[i], the ith is considered empty. - ReductionOpT reduction_op, ///< [in] Binary reduction functor - OutputT init, ///< [in] The initial value of the reduction - cudaStream_t stream, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - typedef typename DispatchSegmentedReduce::MaxPolicy MaxPolicyT; - - if (num_segments <= 0) - return cudaSuccess; - - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - if (CubDebug(error = PtxVersion(ptx_version))) break; - - // Create dispatch functor - DispatchSegmentedReduce dispatch( - d_temp_storage, temp_storage_bytes, - d_in, d_out, - num_segments, d_begin_offsets, d_end_offsets, - reduction_op, init, - stream, debug_synchronous, ptx_version); - - // Dispatch to chained policy - if (CubDebug(error = MaxPolicyT::Invoke(ptx_version, dispatch))) break; - } - while (0); - - return error; - } -}; - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/dispatch/dispatch_reduce_by_key.cuh b/ml-xgboost/cub/cub/device/dispatch/dispatch_reduce_by_key.cuh deleted file mode 100644 index 63ba62b..0000000 --- a/ml-xgboost/cub/cub/device/dispatch/dispatch_reduce_by_key.cuh +++ /dev/null @@ -1,554 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceReduceByKey provides device-wide, parallel operations for reducing segments of values residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "dispatch_scan.cuh" -#include "../../agent/agent_reduce_by_key.cuh" -#include "../../thread/thread_operators.cuh" -#include "../../grid/grid_queue.cuh" -#include "../../util_device.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/****************************************************************************** - * Kernel entry points - *****************************************************************************/ - -/** - * Multi-block reduce-by-key sweep kernel entry point - */ -template < - typename AgentReduceByKeyPolicyT, ///< Parameterized AgentReduceByKeyPolicyT tuning policy type - typename KeysInputIteratorT, ///< Random-access input iterator type for keys - typename UniqueOutputIteratorT, ///< Random-access output iterator type for keys - typename ValuesInputIteratorT, ///< Random-access input iterator type for values - typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values - typename NumRunsOutputIteratorT, ///< Output iterator type for recording number of segments encountered - typename ScanTileStateT, ///< Tile status interface type - typename EqualityOpT, ///< KeyT equality operator type - typename ReductionOpT, ///< ValueT reduction operator type - typename OffsetT> ///< Signed integer type for global offsets -__launch_bounds__ (int(AgentReduceByKeyPolicyT::BLOCK_THREADS)) -__global__ void DeviceReduceByKeyKernel( - KeysInputIteratorT d_keys_in, ///< Pointer to the input sequence of keys - UniqueOutputIteratorT d_unique_out, ///< Pointer to the output sequence of unique keys (one key per run) - ValuesInputIteratorT d_values_in, ///< Pointer to the input sequence of corresponding values - AggregatesOutputIteratorT d_aggregates_out, ///< Pointer to the output sequence of value aggregates (one aggregate per run) - NumRunsOutputIteratorT d_num_runs_out, ///< Pointer to total number of runs encountered (i.e., the length of d_unique_out) - ScanTileStateT tile_state, ///< Tile status interface - int start_tile, ///< The starting tile for the current grid - EqualityOpT equality_op, ///< KeyT equality operator - ReductionOpT reduction_op, ///< ValueT reduction operator - OffsetT num_items) ///< Total number of items to select from -{ - // Thread block type for reducing tiles of value segments - typedef AgentReduceByKey< - AgentReduceByKeyPolicyT, - KeysInputIteratorT, - UniqueOutputIteratorT, - ValuesInputIteratorT, - AggregatesOutputIteratorT, - NumRunsOutputIteratorT, - EqualityOpT, - ReductionOpT, - OffsetT> - AgentReduceByKeyT; - - // Shared memory for AgentReduceByKey - __shared__ typename AgentReduceByKeyT::TempStorage temp_storage; - - // Process tiles - AgentReduceByKeyT(temp_storage, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, equality_op, reduction_op).ConsumeRange( - num_items, - tile_state, - start_tile); -} - - - - -/****************************************************************************** - * Dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for DeviceReduceByKey - */ -template < - typename KeysInputIteratorT, ///< Random-access input iterator type for keys - typename UniqueOutputIteratorT, ///< Random-access output iterator type for keys - typename ValuesInputIteratorT, ///< Random-access input iterator type for values - typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values - typename NumRunsOutputIteratorT, ///< Output iterator type for recording number of segments encountered - typename EqualityOpT, ///< KeyT equality operator type - typename ReductionOpT, ///< ValueT reduction operator type - typename OffsetT> ///< Signed integer type for global offsets -struct DispatchReduceByKey -{ - //------------------------------------------------------------------------- - // Types and constants - //------------------------------------------------------------------------- - - // The input keys type - typedef typename std::iterator_traits::value_type KeyInputT; - - // The output keys type - typedef typename If<(Equals::value_type, void>::VALUE), // KeyOutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type KeyOutputT; // ... else the output iterator's value type - - // The input values type - typedef typename std::iterator_traits::value_type ValueInputT; - - // The output values type - typedef typename If<(Equals::value_type, void>::VALUE), // ValueOutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type ValueOutputT; // ... else the output iterator's value type - - enum - { - INIT_KERNEL_THREADS = 128, - MAX_INPUT_BYTES = CUB_MAX(sizeof(KeyOutputT), sizeof(ValueOutputT)), - COMBINED_INPUT_BYTES = sizeof(KeyOutputT) + sizeof(ValueOutputT), - }; - - // Tile status descriptor interface type - typedef ReduceByKeyScanTileState ScanTileStateT; - - - //------------------------------------------------------------------------- - // Tuning policies - //------------------------------------------------------------------------- - - /// SM35 - struct Policy350 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 6, - ITEMS_PER_THREAD = (MAX_INPUT_BYTES <= 8) ? 6 : CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, ((NOMINAL_4B_ITEMS_PER_THREAD * 8) + COMBINED_INPUT_BYTES - 1) / COMBINED_INPUT_BYTES)), - }; - - typedef AgentReduceByKeyPolicy< - 128, - ITEMS_PER_THREAD, - BLOCK_LOAD_DIRECT, - LOAD_LDG, - BLOCK_SCAN_WARP_SCANS> - ReduceByKeyPolicyT; - }; - - /// SM30 - struct Policy300 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 6, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, ((NOMINAL_4B_ITEMS_PER_THREAD * 8) + COMBINED_INPUT_BYTES - 1) / COMBINED_INPUT_BYTES)), - }; - - typedef AgentReduceByKeyPolicy< - 128, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_SCAN_WARP_SCANS> - ReduceByKeyPolicyT; - }; - - /// SM20 - struct Policy200 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 11, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, ((NOMINAL_4B_ITEMS_PER_THREAD * 8) + COMBINED_INPUT_BYTES - 1) / COMBINED_INPUT_BYTES)), - }; - - typedef AgentReduceByKeyPolicy< - 128, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_SCAN_WARP_SCANS> - ReduceByKeyPolicyT; - }; - - /// SM13 - struct Policy130 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 7, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, ((NOMINAL_4B_ITEMS_PER_THREAD * 8) + COMBINED_INPUT_BYTES - 1) / COMBINED_INPUT_BYTES)), - }; - - typedef AgentReduceByKeyPolicy< - 128, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_SCAN_WARP_SCANS> - ReduceByKeyPolicyT; - }; - - /// SM11 - struct Policy110 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 5, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 8) / COMBINED_INPUT_BYTES)), - }; - - typedef AgentReduceByKeyPolicy< - 64, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_SCAN_RAKING> - ReduceByKeyPolicyT; - }; - - - /****************************************************************************** - * Tuning policies of current PTX compiler pass - ******************************************************************************/ - -#if (CUB_PTX_ARCH >= 350) - typedef Policy350 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 300) - typedef Policy300 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 200) - typedef Policy200 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 130) - typedef Policy130 PtxPolicy; - -#else - typedef Policy110 PtxPolicy; - -#endif - - // "Opaque" policies (whose parameterizations aren't reflected in the type signature) - struct PtxReduceByKeyPolicy : PtxPolicy::ReduceByKeyPolicyT {}; - - - /****************************************************************************** - * Utilities - ******************************************************************************/ - - /** - * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use - */ - template - CUB_RUNTIME_FUNCTION __forceinline__ - static void InitConfigs( - int ptx_version, - KernelConfig &reduce_by_key_config) - { - #if (CUB_PTX_ARCH > 0) - (void)ptx_version; - - // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy - reduce_by_key_config.template Init(); - - #else - - // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version - if (ptx_version >= 350) - { - reduce_by_key_config.template Init(); - } - else if (ptx_version >= 300) - { - reduce_by_key_config.template Init(); - } - else if (ptx_version >= 200) - { - reduce_by_key_config.template Init(); - } - else if (ptx_version >= 130) - { - reduce_by_key_config.template Init(); - } - else - { - reduce_by_key_config.template Init(); - } - - #endif - } - - - /** - * Kernel kernel dispatch configuration. - */ - struct KernelConfig - { - int block_threads; - int items_per_thread; - int tile_items; - - template - CUB_RUNTIME_FUNCTION __forceinline__ - void Init() - { - block_threads = PolicyT::BLOCK_THREADS; - items_per_thread = PolicyT::ITEMS_PER_THREAD; - tile_items = block_threads * items_per_thread; - } - }; - - - //--------------------------------------------------------------------- - // Dispatch entrypoints - //--------------------------------------------------------------------- - - /** - * Internal dispatch routine for computing a device-wide reduce-by-key using the - * specified kernel functions. - */ - template < - typename ScanInitKernelT, ///< Function type of cub::DeviceScanInitKernel - typename ReduceByKeyKernelT> ///< Function type of cub::DeviceReduceByKeyKernelT - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - KeysInputIteratorT d_keys_in, ///< [in] Pointer to the input sequence of keys - UniqueOutputIteratorT d_unique_out, ///< [out] Pointer to the output sequence of unique keys (one key per run) - ValuesInputIteratorT d_values_in, ///< [in] Pointer to the input sequence of corresponding values - AggregatesOutputIteratorT d_aggregates_out, ///< [out] Pointer to the output sequence of value aggregates (one aggregate per run) - NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs encountered (i.e., the length of d_unique_out) - EqualityOpT equality_op, ///< [in] KeyT equality operator - ReductionOpT reduction_op, ///< [in] ValueT reduction operator - OffsetT num_items, ///< [in] Total number of items to select from - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - int /*ptx_version*/, ///< [in] PTX version of dispatch kernels - ScanInitKernelT init_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceScanInitKernel - ReduceByKeyKernelT reduce_by_key_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceReduceByKeyKernel - KernelConfig reduce_by_key_config) ///< [in] Dispatch parameters that match the policy that \p reduce_by_key_kernel was compiled for - { - -#ifndef CUB_RUNTIME_ENABLED - (void)d_temp_storage; - (void)temp_storage_bytes; - (void)d_keys_in; - (void)d_unique_out; - (void)d_values_in; - (void)d_aggregates_out; - (void)d_num_runs_out; - (void)equality_op; - (void)reduction_op; - (void)num_items; - (void)stream; - (void)debug_synchronous; - (void)init_kernel; - (void)reduce_by_key_kernel; - (void)reduce_by_key_config; - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported); - -#else - - cudaError error = cudaSuccess; - do - { - // Get device ordinal - int device_ordinal; - if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; - - // Get SM count - int sm_count; - if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; - - // Number of input tiles - int tile_size = reduce_by_key_config.block_threads * reduce_by_key_config.items_per_thread; - int num_tiles = (num_items + tile_size - 1) / tile_size; - - // Specify temporary storage allocation requirements - size_t allocation_sizes[1]; - if (CubDebug(error = ScanTileStateT::AllocationSize(num_tiles, allocation_sizes[0]))) break; // bytes needed for tile status descriptors - - // Compute allocation pointers into the single storage blob (or compute the necessary size of the blob) - void* allocations[1]; - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - break; - } - - // Construct the tile status interface - ScanTileStateT tile_state; - if (CubDebug(error = tile_state.Init(num_tiles, allocations[0], allocation_sizes[0]))) break; - - // Log init_kernel configuration - int init_grid_size = CUB_MAX(1, (num_tiles + INIT_KERNEL_THREADS - 1) / INIT_KERNEL_THREADS); - if (debug_synchronous) _CubLog("Invoking init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, INIT_KERNEL_THREADS, (long long) stream); - - // Invoke init_kernel to initialize tile descriptors - init_kernel<<>>( - tile_state, - num_tiles, - d_num_runs_out); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - // Return if empty problem - if (num_items == 0) - break; - - // Get SM occupancy for reduce_by_key_kernel - int reduce_by_key_sm_occupancy; - if (CubDebug(error = MaxSmOccupancy( - reduce_by_key_sm_occupancy, // out - reduce_by_key_kernel, - reduce_by_key_config.block_threads))) break; - - // Get max x-dimension of grid - int max_dim_x; - if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) break;; - - // Run grids in epochs (in case number of tiles exceeds max x-dimension - int scan_grid_size = CUB_MIN(num_tiles, max_dim_x); - for (int start_tile = 0; start_tile < num_tiles; start_tile += scan_grid_size) - { - // Log reduce_by_key_kernel configuration - if (debug_synchronous) _CubLog("Invoking %d reduce_by_key_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - start_tile, scan_grid_size, reduce_by_key_config.block_threads, (long long) stream, reduce_by_key_config.items_per_thread, reduce_by_key_sm_occupancy); - - // Invoke reduce_by_key_kernel - reduce_by_key_kernel<<>>( - d_keys_in, - d_unique_out, - d_values_in, - d_aggregates_out, - d_num_runs_out, - tile_state, - start_tile, - equality_op, - reduction_op, - num_items); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - /** - * Internal dispatch routine - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - KeysInputIteratorT d_keys_in, ///< [in] Pointer to the input sequence of keys - UniqueOutputIteratorT d_unique_out, ///< [out] Pointer to the output sequence of unique keys (one key per run) - ValuesInputIteratorT d_values_in, ///< [in] Pointer to the input sequence of corresponding values - AggregatesOutputIteratorT d_aggregates_out, ///< [out] Pointer to the output sequence of value aggregates (one aggregate per run) - NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs encountered (i.e., the length of d_unique_out) - EqualityOpT equality_op, ///< [in] KeyT equality operator - ReductionOpT reduction_op, ///< [in] ValueT reduction operator - OffsetT num_items, ///< [in] Total number of items to select from - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous) ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - #if (CUB_PTX_ARCH == 0) - if (CubDebug(error = PtxVersion(ptx_version))) break; - #else - ptx_version = CUB_PTX_ARCH; - #endif - - // Get kernel kernel dispatch configurations - KernelConfig reduce_by_key_config; - InitConfigs(ptx_version, reduce_by_key_config); - - // Dispatch - if (CubDebug(error = Dispatch( - d_temp_storage, - temp_storage_bytes, - d_keys_in, - d_unique_out, - d_values_in, - d_aggregates_out, - d_num_runs_out, - equality_op, - reduction_op, - num_items, - stream, - debug_synchronous, - ptx_version, - DeviceCompactInitKernel, - DeviceReduceByKeyKernel, - reduce_by_key_config))) break; - } - while (0); - - return error; - } -}; - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/dispatch/dispatch_rle.cuh b/ml-xgboost/cub/cub/device/dispatch/dispatch_rle.cuh deleted file mode 100644 index f05ed4c..0000000 --- a/ml-xgboost/cub/cub/device/dispatch/dispatch_rle.cuh +++ /dev/null @@ -1,538 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceRle provides device-wide, parallel operations for run-length-encoding sequences of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "dispatch_scan.cuh" -#include "../../agent/agent_rle.cuh" -#include "../../thread/thread_operators.cuh" -#include "../../grid/grid_queue.cuh" -#include "../../util_device.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Kernel entry points - *****************************************************************************/ - -/** - * Select kernel entry point (multi-block) - * - * Performs functor-based selection if SelectOp functor type != NullType - * Otherwise performs flag-based selection if FlagIterator's value type != NullType - * Otherwise performs discontinuity selection (keep unique) - */ -template < - typename AgentRlePolicyT, ///< Parameterized AgentRlePolicyT tuning policy type - typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator - typename OffsetsOutputIteratorT, ///< Random-access output iterator type for writing run-offset values \iterator - typename LengthsOutputIteratorT, ///< Random-access output iterator type for writing run-length values \iterator - typename NumRunsOutputIteratorT, ///< Output iterator type for recording the number of runs encountered \iterator - typename ScanTileStateT, ///< Tile status interface type - typename EqualityOpT, ///< T equality operator type - typename OffsetT> ///< Signed integer type for global offsets -__launch_bounds__ (int(AgentRlePolicyT::BLOCK_THREADS)) -__global__ void DeviceRleSweepKernel( - InputIteratorT d_in, ///< [in] Pointer to input sequence of data items - OffsetsOutputIteratorT d_offsets_out, ///< [out] Pointer to output sequence of run-offsets - LengthsOutputIteratorT d_lengths_out, ///< [out] Pointer to output sequence of run-lengths - NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs (i.e., length of \p d_offsets_out) - ScanTileStateT tile_status, ///< [in] Tile status interface - EqualityOpT equality_op, ///< [in] Equality operator for input items - OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - int num_tiles) ///< [in] Total number of tiles for the entire problem -{ - // Thread block type for selecting data from input tiles - typedef AgentRle< - AgentRlePolicyT, - InputIteratorT, - OffsetsOutputIteratorT, - LengthsOutputIteratorT, - EqualityOpT, - OffsetT> AgentRleT; - - // Shared memory for AgentRle - __shared__ typename AgentRleT::TempStorage temp_storage; - - // Process tiles - AgentRleT(temp_storage, d_in, d_offsets_out, d_lengths_out, equality_op, num_items).ConsumeRange( - num_tiles, - tile_status, - d_num_runs_out); -} - - - - -/****************************************************************************** - * Dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for DeviceRle - */ -template < - typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator - typename OffsetsOutputIteratorT, ///< Random-access output iterator type for writing run-offset values \iterator - typename LengthsOutputIteratorT, ///< Random-access output iterator type for writing run-length values \iterator - typename NumRunsOutputIteratorT, ///< Output iterator type for recording the number of runs encountered \iterator - typename EqualityOpT, ///< T equality operator type - typename OffsetT> ///< Signed integer type for global offsets -struct DeviceRleDispatch -{ - /****************************************************************************** - * Types and constants - ******************************************************************************/ - - // The input value type - typedef typename std::iterator_traits::value_type T; - - // The lengths output value type - typedef typename If<(Equals::value_type, void>::VALUE), // LengthT = (if output iterator's value type is void) ? - OffsetT, // ... then the OffsetT type, - typename std::iterator_traits::value_type>::Type LengthT; // ... else the output iterator's value type - - enum - { - INIT_KERNEL_THREADS = 128, - }; - - // Tile status descriptor interface type - typedef ReduceByKeyScanTileState ScanTileStateT; - - - /****************************************************************************** - * Tuning policies - ******************************************************************************/ - - /// SM35 - struct Policy350 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 15, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), - }; - - typedef AgentRlePolicy< - 96, - ITEMS_PER_THREAD, - BLOCK_LOAD_DIRECT, - LOAD_LDG, - true, - BLOCK_SCAN_WARP_SCANS> - RleSweepPolicy; - }; - - /// SM30 - struct Policy300 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 5, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), - }; - - typedef AgentRlePolicy< - 256, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - true, - BLOCK_SCAN_RAKING_MEMOIZE> - RleSweepPolicy; - }; - - /// SM20 - struct Policy200 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 15, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), - }; - - typedef AgentRlePolicy< - 128, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - false, - BLOCK_SCAN_WARP_SCANS> - RleSweepPolicy; - }; - - /// SM13 - struct Policy130 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 9, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), - }; - - typedef AgentRlePolicy< - 64, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - true, - BLOCK_SCAN_RAKING_MEMOIZE> - RleSweepPolicy; - }; - - /// SM10 - struct Policy100 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 9, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), - }; - - typedef AgentRlePolicy< - 256, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - true, - BLOCK_SCAN_RAKING_MEMOIZE> - RleSweepPolicy; - }; - - - /****************************************************************************** - * Tuning policies of current PTX compiler pass - ******************************************************************************/ - -#if (CUB_PTX_ARCH >= 350) - typedef Policy350 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 300) - typedef Policy300 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 200) - typedef Policy200 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 130) - typedef Policy130 PtxPolicy; - -#else - typedef Policy100 PtxPolicy; - -#endif - - // "Opaque" policies (whose parameterizations aren't reflected in the type signature) - struct PtxRleSweepPolicy : PtxPolicy::RleSweepPolicy {}; - - - /****************************************************************************** - * Utilities - ******************************************************************************/ - - /** - * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use - */ - template - CUB_RUNTIME_FUNCTION __forceinline__ - static void InitConfigs( - int ptx_version, - KernelConfig& device_rle_config) - { - #if (CUB_PTX_ARCH > 0) - - // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy - device_rle_config.template Init(); - - #else - - // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version - if (ptx_version >= 350) - { - device_rle_config.template Init(); - } - else if (ptx_version >= 300) - { - device_rle_config.template Init(); - } - else if (ptx_version >= 200) - { - device_rle_config.template Init(); - } - else if (ptx_version >= 130) - { - device_rle_config.template Init(); - } - else - { - device_rle_config.template Init(); - } - - #endif - } - - - /** - * Kernel kernel dispatch configuration. Mirrors the constants within AgentRlePolicyT. - */ - struct KernelConfig - { - int block_threads; - int items_per_thread; - BlockLoadAlgorithm load_policy; - bool store_warp_time_slicing; - BlockScanAlgorithm scan_algorithm; - - template - CUB_RUNTIME_FUNCTION __forceinline__ - void Init() - { - block_threads = AgentRlePolicyT::BLOCK_THREADS; - items_per_thread = AgentRlePolicyT::ITEMS_PER_THREAD; - load_policy = AgentRlePolicyT::LOAD_ALGORITHM; - store_warp_time_slicing = AgentRlePolicyT::STORE_WARP_TIME_SLICING; - scan_algorithm = AgentRlePolicyT::SCAN_ALGORITHM; - } - - CUB_RUNTIME_FUNCTION __forceinline__ - void Print() - { - printf("%d, %d, %d, %d, %d", - block_threads, - items_per_thread, - load_policy, - store_warp_time_slicing, - scan_algorithm); - } - }; - - - /****************************************************************************** - * Dispatch entrypoints - ******************************************************************************/ - - /** - * Internal dispatch routine for computing a device-wide run-length-encode using the - * specified kernel functions. - */ - template < - typename DeviceScanInitKernelPtr, ///< Function type of cub::DeviceScanInitKernel - typename DeviceRleSweepKernelPtr> ///< Function type of cub::DeviceRleSweepKernelPtr - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OffsetsOutputIteratorT d_offsets_out, ///< [out] Pointer to the output sequence of run-offsets - LengthsOutputIteratorT d_lengths_out, ///< [out] Pointer to the output sequence of run-lengths - NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to the total number of runs encountered (i.e., length of \p d_offsets_out) - EqualityOpT equality_op, ///< [in] Equality operator for input items - OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - int ptx_version, ///< [in] PTX version of dispatch kernels - DeviceScanInitKernelPtr device_scan_init_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceScanInitKernel - DeviceRleSweepKernelPtr device_rle_sweep_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceRleSweepKernel - KernelConfig device_rle_config) ///< [in] Dispatch parameters that match the policy that \p device_rle_sweep_kernel was compiled for - { - -#ifndef CUB_RUNTIME_ENABLED - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported); - -#else - - cudaError error = cudaSuccess; - do - { - // Get device ordinal - int device_ordinal; - if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; - - // Get SM count - int sm_count; - if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; - - // Number of input tiles - int tile_size = device_rle_config.block_threads * device_rle_config.items_per_thread; - int num_tiles = (num_items + tile_size - 1) / tile_size; - - // Specify temporary storage allocation requirements - size_t allocation_sizes[1]; - if (CubDebug(error = ScanTileStateT::AllocationSize(num_tiles, allocation_sizes[0]))) break; // bytes needed for tile status descriptors - - // Compute allocation pointers into the single storage blob (or compute the necessary size of the blob) - void* allocations[1]; - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - break; - } - - // Construct the tile status interface - ScanTileStateT tile_status; - if (CubDebug(error = tile_status.Init(num_tiles, allocations[0], allocation_sizes[0]))) break; - - // Log device_scan_init_kernel configuration - int init_grid_size = CUB_MAX(1, (num_tiles + INIT_KERNEL_THREADS - 1) / INIT_KERNEL_THREADS); - if (debug_synchronous) _CubLog("Invoking device_scan_init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, INIT_KERNEL_THREADS, (long long) stream); - - // Invoke device_scan_init_kernel to initialize tile descriptors and queue descriptors - device_scan_init_kernel<<>>( - tile_status, - num_tiles, - d_num_runs_out); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - // Return if empty problem - if (num_items == 0) - break; - - // Get SM occupancy for device_rle_sweep_kernel - int device_rle_kernel_sm_occupancy; - if (CubDebug(error = MaxSmOccupancy( - device_rle_kernel_sm_occupancy, // out - device_rle_sweep_kernel, - device_rle_config.block_threads))) break; - - // Get max x-dimension of grid - int max_dim_x; - if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) break;; - - // Get grid size for scanning tiles - dim3 scan_grid_size; - scan_grid_size.z = 1; - scan_grid_size.y = ((unsigned int) num_tiles + max_dim_x - 1) / max_dim_x; - scan_grid_size.x = CUB_MIN(num_tiles, max_dim_x); - - // Log device_rle_sweep_kernel configuration - if (debug_synchronous) _CubLog("Invoking device_rle_sweep_kernel<<<{%d,%d,%d}, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - scan_grid_size.x, scan_grid_size.y, scan_grid_size.z, device_rle_config.block_threads, (long long) stream, device_rle_config.items_per_thread, device_rle_kernel_sm_occupancy); - - // Invoke device_rle_sweep_kernel - device_rle_sweep_kernel<<>>( - d_in, - d_offsets_out, - d_lengths_out, - d_num_runs_out, - tile_status, - equality_op, - num_items, - num_tiles); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - /** - * Internal dispatch routine - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to input sequence of data items - OffsetsOutputIteratorT d_offsets_out, ///< [out] Pointer to output sequence of run-offsets - LengthsOutputIteratorT d_lengths_out, ///< [out] Pointer to output sequence of run-lengths - NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs (i.e., length of \p d_offsets_out) - EqualityOpT equality_op, ///< [in] Equality operator for input items - OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - cudaStream_t stream, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - #if (CUB_PTX_ARCH == 0) - if (CubDebug(error = PtxVersion(ptx_version))) break; - #else - ptx_version = CUB_PTX_ARCH; - #endif - - // Get kernel kernel dispatch configurations - KernelConfig device_rle_config; - InitConfigs(ptx_version, device_rle_config); - - // Dispatch - if (CubDebug(error = Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_offsets_out, - d_lengths_out, - d_num_runs_out, - equality_op, - num_items, - stream, - debug_synchronous, - ptx_version, - DeviceCompactInitKernel, - DeviceRleSweepKernel, - device_rle_config))) break; - } - while (0); - - return error; - } -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/dispatch/dispatch_scan.cuh b/ml-xgboost/cub/cub/device/dispatch/dispatch_scan.cuh deleted file mode 100644 index c39f3bd..0000000 --- a/ml-xgboost/cub/cub/device/dispatch/dispatch_scan.cuh +++ /dev/null @@ -1,563 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceScan provides device-wide, parallel operations for computing a prefix scan across a sequence of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "../../agent/agent_scan.cuh" -#include "../../thread/thread_operators.cuh" -#include "../../grid/grid_queue.cuh" -#include "../../util_arch.cuh" -#include "../../util_debug.cuh" -#include "../../util_device.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Kernel entry points - *****************************************************************************/ - -/** - * Initialization kernel for tile status initialization (multi-block) - */ -template < - typename ScanTileStateT> ///< Tile status interface type -__global__ void DeviceScanInitKernel( - ScanTileStateT tile_state, ///< [in] Tile status interface - int num_tiles) ///< [in] Number of tiles -{ - // Initialize tile status - tile_state.InitializeStatus(num_tiles); -} - -/** - * Initialization kernel for tile status initialization (multi-block) - */ -template < - typename ScanTileStateT, ///< Tile status interface type - typename NumSelectedIteratorT> ///< Output iterator type for recording the number of items selected -__global__ void DeviceCompactInitKernel( - ScanTileStateT tile_state, ///< [in] Tile status interface - int num_tiles, ///< [in] Number of tiles - NumSelectedIteratorT d_num_selected_out) ///< [out] Pointer to the total number of items selected (i.e., length of \p d_selected_out) -{ - // Initialize tile status - tile_state.InitializeStatus(num_tiles); - - // Initialize d_num_selected_out - if ((blockIdx.x == 0) && (threadIdx.x == 0)) - *d_num_selected_out = 0; -} - - -/** - * Scan kernel entry point (multi-block) - */ -template < - typename ScanPolicyT, ///< Parameterized ScanPolicyT tuning policy type - typename InputIteratorT, ///< Random-access input iterator type for reading scan inputs \iterator - typename OutputIteratorT, ///< Random-access output iterator type for writing scan outputs \iterator - typename ScanTileStateT, ///< Tile status interface type - typename ScanOpT, ///< Binary scan functor type having member T operator()(const T &a, const T &b) - typename InitValueT, ///< Initial value to seed the exclusive scan (cub::NullType for inclusive scans) - typename OffsetT> ///< Signed integer type for global offsets -__launch_bounds__ (int(ScanPolicyT::BLOCK_THREADS)) -__global__ void DeviceScanKernel( - InputIteratorT d_in, ///< Input data - OutputIteratorT d_out, ///< Output data - ScanTileStateT tile_state, ///< Tile status interface - int start_tile, ///< The starting tile for the current grid - ScanOpT scan_op, ///< Binary scan functor - InitValueT init_value, ///< Initial value to seed the exclusive scan - OffsetT num_items) ///< Total number of scan items for the entire problem -{ - // Thread block type for scanning input tiles - typedef AgentScan< - ScanPolicyT, - InputIteratorT, - OutputIteratorT, - ScanOpT, - InitValueT, - OffsetT> AgentScanT; - - // Shared memory for AgentScan - __shared__ typename AgentScanT::TempStorage temp_storage; - - // Process tiles - AgentScanT(temp_storage, d_in, d_out, scan_op, init_value).ConsumeRange( - num_items, - tile_state, - start_tile); -} - - - - -/****************************************************************************** - * Dispatch - ******************************************************************************/ - - -/** - * Utility class for dispatching the appropriately-tuned kernels for DeviceScan - */ -template < - typename InputIteratorT, ///< Random-access input iterator type for reading scan inputs \iterator - typename OutputIteratorT, ///< Random-access output iterator type for writing scan outputs \iterator - typename ScanOpT, ///< Binary scan functor type having member T operator()(const T &a, const T &b) - typename InitValueT, ///< The init_value element type for ScanOpT (cub::NullType for inclusive scans) - typename OffsetT> ///< Signed integer type for global offsets -struct DispatchScan -{ - //--------------------------------------------------------------------- - // Constants and Types - //--------------------------------------------------------------------- - - enum - { - INIT_KERNEL_THREADS = 128 - }; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - // Tile status descriptor interface type - typedef ScanTileState ScanTileStateT; - - - //--------------------------------------------------------------------- - // Tuning policies - //--------------------------------------------------------------------- - - /// SM600 - struct Policy600 - { - typedef AgentScanPolicy< - CUB_NOMINAL_CONFIG(128, 15, OutputT), ///< Threads per block, items per thread - BLOCK_LOAD_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_STORE_TRANSPOSE, - BLOCK_SCAN_WARP_SCANS> - ScanPolicyT; - }; - - - /// SM520 - struct Policy520 - { - // Titan X: 32.47B items/s @ 48M 32-bit T - typedef AgentScanPolicy< - CUB_NOMINAL_CONFIG(128, 12, OutputT), ///< Threads per block, items per thread - BLOCK_LOAD_DIRECT, - LOAD_LDG, - BLOCK_STORE_WARP_TRANSPOSE, - BLOCK_SCAN_WARP_SCANS> - ScanPolicyT; - }; - - - /// SM35 - struct Policy350 - { - // GTX Titan: 29.5B items/s (232.4 GB/s) @ 48M 32-bit T - typedef AgentScanPolicy< - CUB_NOMINAL_CONFIG(128, 12, OutputT), ///< Threads per block, items per thread - BLOCK_LOAD_DIRECT, - LOAD_LDG, - BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED, - BLOCK_SCAN_RAKING> - ScanPolicyT; - }; - - /// SM30 - struct Policy300 - { - typedef AgentScanPolicy< - CUB_NOMINAL_CONFIG(256, 9, OutputT), ///< Threads per block, items per thread - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_STORE_WARP_TRANSPOSE, - BLOCK_SCAN_WARP_SCANS> - ScanPolicyT; - }; - - /// SM20 - struct Policy200 - { - // GTX 580: 20.3B items/s (162.3 GB/s) @ 48M 32-bit T - typedef AgentScanPolicy< - CUB_NOMINAL_CONFIG(128, 12, OutputT), ///< Threads per block, items per thread - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_STORE_WARP_TRANSPOSE, - BLOCK_SCAN_WARP_SCANS> - ScanPolicyT; - }; - - /// SM13 - struct Policy130 - { - typedef AgentScanPolicy< - CUB_NOMINAL_CONFIG(96, 21, OutputT), ///< Threads per block, items per thread - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_STORE_WARP_TRANSPOSE, - BLOCK_SCAN_RAKING_MEMOIZE> - ScanPolicyT; - }; - - /// SM10 - struct Policy100 - { - typedef AgentScanPolicy< - CUB_NOMINAL_CONFIG(64, 9, OutputT), ///< Threads per block, items per thread - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_STORE_WARP_TRANSPOSE, - BLOCK_SCAN_WARP_SCANS> - ScanPolicyT; - }; - - - //--------------------------------------------------------------------- - // Tuning policies of current PTX compiler pass - //--------------------------------------------------------------------- - -#if (CUB_PTX_ARCH >= 600) - typedef Policy600 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 520) - typedef Policy520 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 350) - typedef Policy350 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 300) - typedef Policy300 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 200) - typedef Policy200 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 130) - typedef Policy130 PtxPolicy; - -#else - typedef Policy100 PtxPolicy; - -#endif - - // "Opaque" policies (whose parameterizations aren't reflected in the type signature) - struct PtxAgentScanPolicy : PtxPolicy::ScanPolicyT {}; - - - //--------------------------------------------------------------------- - // Utilities - //--------------------------------------------------------------------- - - /** - * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use - */ - template - CUB_RUNTIME_FUNCTION __forceinline__ - static void InitConfigs( - int ptx_version, - KernelConfig &scan_kernel_config) - { - #if (CUB_PTX_ARCH > 0) - (void)ptx_version; - - // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy - scan_kernel_config.template Init(); - - #else - - // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version - if (ptx_version >= 600) - { - scan_kernel_config.template Init(); - } - else if (ptx_version >= 520) - { - scan_kernel_config.template Init(); - } - else if (ptx_version >= 350) - { - scan_kernel_config.template Init(); - } - else if (ptx_version >= 300) - { - scan_kernel_config.template Init(); - } - else if (ptx_version >= 200) - { - scan_kernel_config.template Init(); - } - else if (ptx_version >= 130) - { - scan_kernel_config.template Init(); - } - else - { - scan_kernel_config.template Init(); - } - - #endif - } - - - /** - * Kernel kernel dispatch configuration. - */ - struct KernelConfig - { - int block_threads; - int items_per_thread; - int tile_items; - - template - CUB_RUNTIME_FUNCTION __forceinline__ - void Init() - { - block_threads = PolicyT::BLOCK_THREADS; - items_per_thread = PolicyT::ITEMS_PER_THREAD; - tile_items = block_threads * items_per_thread; - } - }; - - - //--------------------------------------------------------------------- - // Dispatch entrypoints - //--------------------------------------------------------------------- - - /** - * Internal dispatch routine for computing a device-wide prefix scan using the - * specified kernel functions. - */ - template < - typename ScanInitKernelPtrT, ///< Function type of cub::DeviceScanInitKernel - typename ScanSweepKernelPtrT> ///< Function type of cub::DeviceScanKernelPtrT - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items - ScanOpT scan_op, ///< [in] Binary scan functor - InitValueT init_value, ///< [in] Initial value to seed the exclusive scan - OffsetT num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - int /*ptx_version*/, ///< [in] PTX version of dispatch kernels - ScanInitKernelPtrT init_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceScanInitKernel - ScanSweepKernelPtrT scan_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceScanKernel - KernelConfig scan_kernel_config) ///< [in] Dispatch parameters that match the policy that \p scan_kernel was compiled for - { - -#ifndef CUB_RUNTIME_ENABLED - (void)d_temp_storage; - (void)temp_storage_bytes; - (void)d_in; - (void)d_out; - (void)scan_op; - (void)init_value; - (void)num_items; - (void)stream; - (void)debug_synchronous; - (void)init_kernel; - (void)scan_kernel; - (void)scan_kernel_config; - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported); - -#else - cudaError error = cudaSuccess; - do - { - // Get device ordinal - int device_ordinal; - if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; - - // Get SM count - int sm_count; - if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; - - // Number of input tiles - int tile_size = scan_kernel_config.block_threads * scan_kernel_config.items_per_thread; - int num_tiles = (num_items + tile_size - 1) / tile_size; - - // Specify temporary storage allocation requirements - size_t allocation_sizes[1]; - if (CubDebug(error = ScanTileStateT::AllocationSize(num_tiles, allocation_sizes[0]))) break; // bytes needed for tile status descriptors - - // Compute allocation pointers into the single storage blob (or compute the necessary size of the blob) - void* allocations[1]; - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - break; - } - - // Return if empty problem - if (num_items == 0) - break; - - // Construct the tile status interface - ScanTileStateT tile_state; - if (CubDebug(error = tile_state.Init(num_tiles, allocations[0], allocation_sizes[0]))) break; - - // Log init_kernel configuration - int init_grid_size = (num_tiles + INIT_KERNEL_THREADS - 1) / INIT_KERNEL_THREADS; - if (debug_synchronous) _CubLog("Invoking init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, INIT_KERNEL_THREADS, (long long) stream); - - // Invoke init_kernel to initialize tile descriptors - init_kernel<<>>( - tile_state, - num_tiles); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - // Get SM occupancy for scan_kernel - int scan_sm_occupancy; - if (CubDebug(error = MaxSmOccupancy( - scan_sm_occupancy, // out - scan_kernel, - scan_kernel_config.block_threads))) break; - - // Get max x-dimension of grid - int max_dim_x; - if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) break;; - - // Run grids in epochs (in case number of tiles exceeds max x-dimension - int scan_grid_size = CUB_MIN(num_tiles, max_dim_x); - for (int start_tile = 0; start_tile < num_tiles; start_tile += scan_grid_size) - { - // Log scan_kernel configuration - if (debug_synchronous) _CubLog("Invoking %d scan_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - start_tile, scan_grid_size, scan_kernel_config.block_threads, (long long) stream, scan_kernel_config.items_per_thread, scan_sm_occupancy); - - // Invoke scan_kernel - scan_kernel<<>>( - d_in, - d_out, - tile_state, - start_tile, - scan_op, - init_value, - num_items); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - /** - * Internal dispatch routine - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items - ScanOpT scan_op, ///< [in] Binary scan functor - InitValueT init_value, ///< [in] Initial value to seed the exclusive scan - OffsetT num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) - cudaStream_t stream, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - if (CubDebug(error = PtxVersion(ptx_version))) break; - - // Get kernel kernel dispatch configurations - KernelConfig scan_kernel_config; - InitConfigs(ptx_version, scan_kernel_config); - - // Dispatch - if (CubDebug(error = Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - scan_op, - init_value, - num_items, - stream, - debug_synchronous, - ptx_version, - DeviceScanInitKernel, - DeviceScanKernel, - scan_kernel_config))) break; - } - while (0); - - return error; - } -}; - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/dispatch/dispatch_select_if.cuh b/ml-xgboost/cub/cub/device/dispatch/dispatch_select_if.cuh deleted file mode 100644 index c6bf44d..0000000 --- a/ml-xgboost/cub/cub/device/dispatch/dispatch_select_if.cuh +++ /dev/null @@ -1,542 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceSelect provides device-wide, parallel operations for selecting items from sequences of data items residing within device-accessible memory. - */ - -#pragma once - -#include -#include - -#include "dispatch_scan.cuh" -#include "../../agent/agent_select_if.cuh" -#include "../../thread/thread_operators.cuh" -#include "../../grid/grid_queue.cuh" -#include "../../util_device.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/****************************************************************************** - * Kernel entry points - *****************************************************************************/ - -/** - * Select kernel entry point (multi-block) - * - * Performs functor-based selection if SelectOpT functor type != NullType - * Otherwise performs flag-based selection if FlagsInputIterator's value type != NullType - * Otherwise performs discontinuity selection (keep unique) - */ -template < - typename AgentSelectIfPolicyT, ///< Parameterized AgentSelectIfPolicyT tuning policy type - typename InputIteratorT, ///< Random-access input iterator type for reading input items - typename FlagsInputIteratorT, ///< Random-access input iterator type for reading selection flags (NullType* if a selection functor or discontinuity flagging is to be used for selection) - typename SelectedOutputIteratorT, ///< Random-access output iterator type for writing selected items - typename NumSelectedIteratorT, ///< Output iterator type for recording the number of items selected - typename ScanTileStateT, ///< Tile status interface type - typename SelectOpT, ///< Selection operator type (NullType if selection flags or discontinuity flagging is to be used for selection) - typename EqualityOpT, ///< Equality operator type (NullType if selection functor or selection flags is to be used for selection) - typename OffsetT, ///< Signed integer type for global offsets - bool KEEP_REJECTS> ///< Whether or not we push rejected items to the back of the output -__launch_bounds__ (int(AgentSelectIfPolicyT::BLOCK_THREADS)) -__global__ void DeviceSelectSweepKernel( - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - FlagsInputIteratorT d_flags, ///< [in] Pointer to the input sequence of selection flags (if applicable) - SelectedOutputIteratorT d_selected_out, ///< [out] Pointer to the output sequence of selected data items - NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the total number of items selected (i.e., length of \p d_selected_out) - ScanTileStateT tile_status, ///< [in] Tile status interface - SelectOpT select_op, ///< [in] Selection operator - EqualityOpT equality_op, ///< [in] Equality operator - OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - int num_tiles) ///< [in] Total number of tiles for the entire problem -{ - // Thread block type for selecting data from input tiles - typedef AgentSelectIf< - AgentSelectIfPolicyT, - InputIteratorT, - FlagsInputIteratorT, - SelectedOutputIteratorT, - SelectOpT, - EqualityOpT, - OffsetT, - KEEP_REJECTS> AgentSelectIfT; - - // Shared memory for AgentSelectIf - __shared__ typename AgentSelectIfT::TempStorage temp_storage; - - // Process tiles - AgentSelectIfT(temp_storage, d_in, d_flags, d_selected_out, select_op, equality_op, num_items).ConsumeRange( - num_tiles, - tile_status, - d_num_selected_out); -} - - - - -/****************************************************************************** - * Dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for DeviceSelect - */ -template < - typename InputIteratorT, ///< Random-access input iterator type for reading input items - typename FlagsInputIteratorT, ///< Random-access input iterator type for reading selection flags (NullType* if a selection functor or discontinuity flagging is to be used for selection) - typename SelectedOutputIteratorT, ///< Random-access output iterator type for writing selected items - typename NumSelectedIteratorT, ///< Output iterator type for recording the number of items selected - typename SelectOpT, ///< Selection operator type (NullType if selection flags or discontinuity flagging is to be used for selection) - typename EqualityOpT, ///< Equality operator type (NullType if selection functor or selection flags is to be used for selection) - typename OffsetT, ///< Signed integer type for global offsets - bool KEEP_REJECTS> ///< Whether or not we push rejected items to the back of the output -struct DispatchSelectIf -{ - /****************************************************************************** - * Types and constants - ******************************************************************************/ - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - // The flag value type - typedef typename std::iterator_traits::value_type FlagT; - - enum - { - INIT_KERNEL_THREADS = 128, - }; - - // Tile status descriptor interface type - typedef ScanTileState ScanTileStateT; - - - /****************************************************************************** - * Tuning policies - ******************************************************************************/ - - /// SM35 - struct Policy350 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 10, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(OutputT)))), - }; - - typedef AgentSelectIfPolicy< - 128, - ITEMS_PER_THREAD, - BLOCK_LOAD_DIRECT, - LOAD_LDG, - BLOCK_SCAN_WARP_SCANS> - SelectIfPolicyT; - }; - - /// SM30 - struct Policy300 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 7, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(3, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(OutputT)))), - }; - - typedef AgentSelectIfPolicy< - 128, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_SCAN_WARP_SCANS> - SelectIfPolicyT; - }; - - /// SM20 - struct Policy200 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = (KEEP_REJECTS) ? 7 : 15, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(OutputT)))), - }; - - typedef AgentSelectIfPolicy< - 128, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_SCAN_WARP_SCANS> - SelectIfPolicyT; - }; - - /// SM13 - struct Policy130 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 9, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(OutputT)))), - }; - - typedef AgentSelectIfPolicy< - 64, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_SCAN_RAKING_MEMOIZE> - SelectIfPolicyT; - }; - - /// SM10 - struct Policy100 - { - enum { - NOMINAL_4B_ITEMS_PER_THREAD = 9, - ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(OutputT)))), - }; - - typedef AgentSelectIfPolicy< - 64, - ITEMS_PER_THREAD, - BLOCK_LOAD_WARP_TRANSPOSE, - LOAD_DEFAULT, - BLOCK_SCAN_RAKING> - SelectIfPolicyT; - }; - - - /****************************************************************************** - * Tuning policies of current PTX compiler pass - ******************************************************************************/ - -#if (CUB_PTX_ARCH >= 350) - typedef Policy350 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 300) - typedef Policy300 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 200) - typedef Policy200 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 130) - typedef Policy130 PtxPolicy; - -#else - typedef Policy100 PtxPolicy; - -#endif - - // "Opaque" policies (whose parameterizations aren't reflected in the type signature) - struct PtxSelectIfPolicyT : PtxPolicy::SelectIfPolicyT {}; - - - /****************************************************************************** - * Utilities - ******************************************************************************/ - - /** - * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use - */ - template - CUB_RUNTIME_FUNCTION __forceinline__ - static void InitConfigs( - int ptx_version, - KernelConfig &select_if_config) - { - #if (CUB_PTX_ARCH > 0) - (void)ptx_version; - - // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy - select_if_config.template Init(); - - #else - - // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version - if (ptx_version >= 350) - { - select_if_config.template Init(); - } - else if (ptx_version >= 300) - { - select_if_config.template Init(); - } - else if (ptx_version >= 200) - { - select_if_config.template Init(); - } - else if (ptx_version >= 130) - { - select_if_config.template Init(); - } - else - { - select_if_config.template Init(); - } - - #endif - } - - - /** - * Kernel kernel dispatch configuration. - */ - struct KernelConfig - { - int block_threads; - int items_per_thread; - int tile_items; - - template - CUB_RUNTIME_FUNCTION __forceinline__ - void Init() - { - block_threads = PolicyT::BLOCK_THREADS; - items_per_thread = PolicyT::ITEMS_PER_THREAD; - tile_items = block_threads * items_per_thread; - } - }; - - - /****************************************************************************** - * Dispatch entrypoints - ******************************************************************************/ - - /** - * Internal dispatch routine for computing a device-wide selection using the - * specified kernel functions. - */ - template < - typename ScanInitKernelPtrT, ///< Function type of cub::DeviceScanInitKernel - typename SelectIfKernelPtrT> ///< Function type of cub::SelectIfKernelPtrT - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - FlagsInputIteratorT d_flags, ///< [in] Pointer to the input sequence of selection flags (if applicable) - SelectedOutputIteratorT d_selected_out, ///< [in] Pointer to the output sequence of selected data items - NumSelectedIteratorT d_num_selected_out, ///< [in] Pointer to the total number of items selected (i.e., length of \p d_selected_out) - SelectOpT select_op, ///< [in] Selection operator - EqualityOpT equality_op, ///< [in] Equality operator - OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - int /*ptx_version*/, ///< [in] PTX version of dispatch kernels - ScanInitKernelPtrT scan_init_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceScanInitKernel - SelectIfKernelPtrT select_if_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceSelectSweepKernel - KernelConfig select_if_config) ///< [in] Dispatch parameters that match the policy that \p select_if_kernel was compiled for - { - -#ifndef CUB_RUNTIME_ENABLED - (void)d_temp_storage; - (void)temp_storage_bytes; - (void)d_in; - (void)d_flags; - (void)d_selected_out; - (void)d_num_selected_out; - (void)select_op; - (void)equality_op; - (void)num_items; - (void)stream; - (void)debug_synchronous; - (void)scan_init_kernel; - (void)select_if_kernel; - (void)select_if_config; - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported); - -#else - - cudaError error = cudaSuccess; - do - { - // Get device ordinal - int device_ordinal; - if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; - - // Get SM count - int sm_count; - if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; - - // Number of input tiles - int tile_size = select_if_config.block_threads * select_if_config.items_per_thread; - int num_tiles = (num_items + tile_size - 1) / tile_size; - - // Specify temporary storage allocation requirements - size_t allocation_sizes[1]; - if (CubDebug(error = ScanTileStateT::AllocationSize(num_tiles, allocation_sizes[0]))) break; // bytes needed for tile status descriptors - - // Compute allocation pointers into the single storage blob (or compute the necessary size of the blob) - void* allocations[1]; - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - break; - } - - // Construct the tile status interface - ScanTileStateT tile_status; - if (CubDebug(error = tile_status.Init(num_tiles, allocations[0], allocation_sizes[0]))) break; - - // Log scan_init_kernel configuration - int init_grid_size = CUB_MAX(1, (num_tiles + INIT_KERNEL_THREADS - 1) / INIT_KERNEL_THREADS); - if (debug_synchronous) _CubLog("Invoking scan_init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, INIT_KERNEL_THREADS, (long long) stream); - - // Invoke scan_init_kernel to initialize tile descriptors - scan_init_kernel<<>>( - tile_status, - num_tiles, - d_num_selected_out); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - // Return if empty problem - if (num_items == 0) - break; - - // Get SM occupancy for select_if_kernel - int range_select_sm_occupancy; - if (CubDebug(error = MaxSmOccupancy( - range_select_sm_occupancy, // out - select_if_kernel, - select_if_config.block_threads))) break; - - // Get max x-dimension of grid - int max_dim_x; - if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) break;; - - // Get grid size for scanning tiles - dim3 scan_grid_size; - scan_grid_size.z = 1; - scan_grid_size.y = ((unsigned int) num_tiles + max_dim_x - 1) / max_dim_x; - scan_grid_size.x = CUB_MIN(num_tiles, max_dim_x); - - // Log select_if_kernel configuration - if (debug_synchronous) _CubLog("Invoking select_if_kernel<<<{%d,%d,%d}, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - scan_grid_size.x, scan_grid_size.y, scan_grid_size.z, select_if_config.block_threads, (long long) stream, select_if_config.items_per_thread, range_select_sm_occupancy); - - // Invoke select_if_kernel - select_if_kernel<<>>( - d_in, - d_flags, - d_selected_out, - d_num_selected_out, - tile_status, - select_op, - equality_op, - num_items, - num_tiles); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - /** - * Internal dispatch routine - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items - FlagsInputIteratorT d_flags, ///< [in] Pointer to the input sequence of selection flags (if applicable) - SelectedOutputIteratorT d_selected_out, ///< [in] Pointer to the output sequence of selected data items - NumSelectedIteratorT d_num_selected_out, ///< [in] Pointer to the total number of items selected (i.e., length of \p d_selected_out) - SelectOpT select_op, ///< [in] Selection operator - EqualityOpT equality_op, ///< [in] Equality operator - OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) - cudaStream_t stream, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - #if (CUB_PTX_ARCH == 0) - if (CubDebug(error = PtxVersion(ptx_version))) break; - #else - ptx_version = CUB_PTX_ARCH; - #endif - - // Get kernel kernel dispatch configurations - KernelConfig select_if_config; - InitConfigs(ptx_version, select_if_config); - - // Dispatch - if (CubDebug(error = Dispatch( - d_temp_storage, - temp_storage_bytes, - d_in, - d_flags, - d_selected_out, - d_num_selected_out, - select_op, - equality_op, - num_items, - stream, - debug_synchronous, - ptx_version, - DeviceCompactInitKernel, - DeviceSelectSweepKernel, - select_if_config))) break; - } - while (0); - - return error; - } -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_csrt.cuh b/ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_csrt.cuh deleted file mode 100644 index d7c6d9e..0000000 --- a/ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_csrt.cuh +++ /dev/null @@ -1,477 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceSpmv provides device-wide parallel operations for performing sparse-matrix * vector multiplication (SpMV). - */ - -#pragma once - -#include -#include - -#include "dispatch_scan.cuh" -#include "../../agent/agent_spmv_orig.cuh" -#include "../../util_type.cuh" -#include "../../util_debug.cuh" -#include "../../util_device.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * SpMV kernel entry points - *****************************************************************************/ - -/** - * Spmv agent entry point - */ -template < - typename SpmvPolicyT, ///< Parameterized SpmvPolicy tuning policy type - typename ValueT, ///< Matrix and vector value type - typename OffsetT, ///< Signed integer type for sequence offsets - bool HAS_ALPHA, ///< Whether the input parameter Alpha is 1 - bool HAS_BETA> ///< Whether the input parameter Beta is 0 -__launch_bounds__ (int(SpmvPolicyT::BLOCK_THREADS)) -__global__ void DeviceSpmvKernel( - SpmvParams spmv_params, ///< [in] SpMV input parameter bundle - int merge_items_per_block, ///< [in] Number of merge tiles per block - KeyValuePair* d_tile_carry_pairs) ///< [out] Pointer to the temporary array carry-out dot product row-ids, one per block -{ - // Spmv agent type specialization - typedef AgentSpmv< - SpmvPolicyT, - ValueT, - OffsetT, - HAS_ALPHA, - HAS_BETA> - AgentSpmvT; - - // Shared memory for AgentSpmv - __shared__ typename AgentSpmvT::TempStorage temp_storage; - - AgentSpmvT(temp_storage, spmv_params).ConsumeTile( - merge_items_per_block, d_tile_carry_pairs); -} - - -/****************************************************************************** - * Dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for DeviceSpmv - */ -template < - typename ValueT, ///< Matrix and vector value type - typename OffsetT> ///< Signed integer type for global offsets -struct DispatchSpmv -{ - //--------------------------------------------------------------------- - // Constants and Types - //--------------------------------------------------------------------- - - enum - { - INIT_KERNEL_THREADS = 128 - }; - - // SpmvParams bundle type - typedef SpmvParams SpmvParamsT; - - // Tuple type for scanning {row id, accumulated value} - typedef KeyValuePair KeyValuePairT; - - - //--------------------------------------------------------------------- - // Tuning policies - //--------------------------------------------------------------------- - - /// SM11 - struct Policy110 - { - typedef AgentSpmvPolicy< - 128, - 1, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - }; - - /// SM20 - struct Policy200 - { - typedef AgentSpmvPolicy< - 96, - 18, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - false, - BLOCK_SCAN_RAKING> - SpmvPolicyT; - }; - - - - /// SM30 - struct Policy300 - { - typedef AgentSpmvPolicy< - 96, - 6, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - }; - - - /// SM35 - struct Policy350 - { -/* - typedef AgentSpmvPolicy< - (sizeof(ValueT) > 4) ? 96 : 128, - (sizeof(ValueT) > 4) ? 4 : 7, - LOAD_LDG, - LOAD_CA, - LOAD_LDG, - LOAD_LDG, - LOAD_LDG, - (sizeof(ValueT) > 4) ? true : false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; -*/ - typedef AgentSpmvPolicy< - 128, - 5, - LOAD_CA, - LOAD_CA, - LOAD_LDG, - LOAD_LDG, - LOAD_LDG, - (sizeof(ValueT) > 4) ? true : false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - }; - - /// SM37 - struct Policy370 - { - - typedef AgentSpmvPolicy< - (sizeof(ValueT) > 4) ? 128 : 128, - (sizeof(ValueT) > 4) ? 9 : 14, - LOAD_LDG, - LOAD_CA, - LOAD_LDG, - LOAD_LDG, - LOAD_LDG, - false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - }; - - /// SM50 - struct Policy500 - { - typedef AgentSpmvPolicy< - (sizeof(ValueT) > 4) ? 64 : 128, - (sizeof(ValueT) > 4) ? 6 : 7, - LOAD_LDG, - LOAD_DEFAULT, - (sizeof(ValueT) > 4) ? LOAD_LDG : LOAD_DEFAULT, - (sizeof(ValueT) > 4) ? LOAD_LDG : LOAD_DEFAULT, - LOAD_LDG, - (sizeof(ValueT) > 4) ? true : false, - (sizeof(ValueT) > 4) ? BLOCK_SCAN_WARP_SCANS : BLOCK_SCAN_RAKING_MEMOIZE> - SpmvPolicyT; - }; - - - - //--------------------------------------------------------------------- - // Tuning policies of current PTX compiler pass - //--------------------------------------------------------------------- - -#if (CUB_PTX_ARCH >= 500) - typedef Policy500 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 370) - typedef Policy370 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 350) - typedef Policy350 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 300) - typedef Policy300 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 200) - typedef Policy200 PtxPolicy; - -#else - typedef Policy110 PtxPolicy; - -#endif - - // "Opaque" policies (whose parameterizations aren't reflected in the type signature) - struct PtxSpmvPolicyT : PtxPolicy::SpmvPolicyT {}; - - - //--------------------------------------------------------------------- - // Utilities - //--------------------------------------------------------------------- - - /** - * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use - */ - template - CUB_RUNTIME_FUNCTION __forceinline__ - static void InitConfigs( - int ptx_version, - KernelConfig &spmv_config) - { - #if (CUB_PTX_ARCH > 0) - - // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy - spmv_config.template Init(); - - #else - - // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version - if (ptx_version >= 500) - { - spmv_config.template Init(); - } - else if (ptx_version >= 370) - { - spmv_config.template Init(); - } - else if (ptx_version >= 350) - { - spmv_config.template Init(); - } - else if (ptx_version >= 300) - { - spmv_config.template Init(); - } - else if (ptx_version >= 200) - { - spmv_config.template Init(); - } - else - { - spmv_config.template Init(); - } - - #endif - } - - - /** - * Kernel kernel dispatch configuration. - */ - struct KernelConfig - { - int block_threads; - int items_per_thread; - int tile_items; - - template - CUB_RUNTIME_FUNCTION __forceinline__ - void Init() - { - block_threads = PolicyT::BLOCK_THREADS; - items_per_thread = PolicyT::ITEMS_PER_THREAD; - tile_items = block_threads * items_per_thread; - } - }; - - - //--------------------------------------------------------------------- - // Dispatch entrypoints - //--------------------------------------------------------------------- - - /** - * Internal dispatch routine for computing a device-wide reduction using the - * specified kernel functions. - * - * If the input is larger than a single tile, this method uses two-passes of - * kernel invocations. - */ - template < - typename SpmvKernelT> ///< Function type of cub::AgentSpmvKernel - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SpmvParamsT& spmv_params, ///< SpMV input parameter bundle - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - SpmvKernelT spmv_kernel, ///< [in] Kernel function pointer to parameterization of AgentSpmvKernel - KernelConfig spmv_config) ///< [in] Dispatch parameters that match the policy that \p spmv_kernel was compiled for - { -#ifndef CUB_RUNTIME_ENABLED - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported ); - -#else - cudaError error = cudaSuccess; - do - { - // Get device ordinal - int device_ordinal; - if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; - - // Get SM count - int sm_count; - if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; - - // Total number of spmv work items - int num_merge_items = spmv_params.num_rows + spmv_params.num_nonzeros; - - // Get SM occupancy for kernels - int spmv_sm_occupancy; - if (CubDebug(error = MaxSmOccupancy( - spmv_sm_occupancy, - spmv_kernel, - spmv_config.block_threads))) break; - int spmv_device_occupancy = spmv_sm_occupancy * sm_count; - - // Grid dimensions - int spmv_grid_size = CUB_MIN(((num_merge_items + spmv_config.block_threads - 1) / spmv_config.block_threads), spmv_device_occupancy); - - // Merge items per block - int merge_items_per_block = (num_merge_items + spmv_grid_size - 1) / spmv_grid_size; - - // Get the temporary storage allocation requirements - size_t allocation_sizes[1]; - allocation_sizes[0] = spmv_grid_size * sizeof(KeyValuePairT); // bytes needed for block carry-out pairs - - // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) - void* allocations[1]; - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - return cudaSuccess; - } - KeyValuePairT* d_tile_carry_pairs = (KeyValuePairT*) allocations[0]; // Agent carry-out pairs - - // Log spmv_kernel configuration - if (debug_synchronous) _CubLog("Invoking spmv_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - spmv_grid_size, spmv_config.block_threads, (long long) stream, spmv_config.items_per_thread, spmv_sm_occupancy); - - // Invoke spmv_kernel - spmv_kernel<<>>( - spmv_params, - merge_items_per_block, - d_tile_carry_pairs); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - /** - * Internal dispatch routine for computing a device-wide reduction - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SpmvParamsT& spmv_params, ///< SpMV input parameter bundle - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - #if (CUB_PTX_ARCH == 0) - if (CubDebug(error = PtxVersion(ptx_version))) break; - #else - ptx_version = CUB_PTX_ARCH; - #endif - - // Get kernel kernel dispatch configurations - KernelConfig spmv_config; - InitConfigs(ptx_version, spmv_config); - - if (CubDebug(error = Dispatch( - d_temp_storage, - temp_storage_bytes, - spmv_params, - stream, - debug_synchronous, - DeviceSpmvKernel, - spmv_config))) break; - - } - while (0); - - return error; - } -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_orig.cuh b/ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_orig.cuh deleted file mode 100644 index 1650628..0000000 --- a/ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_orig.cuh +++ /dev/null @@ -1,850 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceSpmv provides device-wide parallel operations for performing sparse-matrix * vector multiplication (SpMV). - */ - -#pragma once - -#include -#include - -#include "../../agent/single_pass_scan_operators.cuh" -#include "../../agent/agent_segment_fixup.cuh" -#include "../../agent/agent_spmv_orig.cuh" -#include "../../util_type.cuh" -#include "../../util_debug.cuh" -#include "../../util_device.cuh" -#include "../../thread/thread_search.cuh" -#include "../../grid/grid_queue.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * SpMV kernel entry points - *****************************************************************************/ - -/** - * Spmv search kernel. Identifies merge path starting coordinates for each tile. - */ -template < - typename AgentSpmvPolicyT, ///< Parameterized SpmvPolicy tuning policy type - typename ValueT, ///< Matrix and vector value type - typename OffsetT> ///< Signed integer type for sequence offsets -__global__ void DeviceSpmv1ColKernel( - SpmvParams spmv_params) ///< [in] SpMV input parameter bundle -{ - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::VECTOR_VALUES_LOAD_MODIFIER, - ValueT, - OffsetT> - VectorValueIteratorT; - - VectorValueIteratorT wrapped_vector_x(spmv_params.d_vector_x); - - int row_idx = (blockIdx.x * blockDim.x) + threadIdx.x; - if (row_idx < spmv_params.num_rows) - { - OffsetT end_nonzero_idx = spmv_params.d_row_end_offsets[row_idx]; - OffsetT nonzero_idx = spmv_params.d_row_end_offsets[row_idx - 1]; - - ValueT value = 0.0; - if (end_nonzero_idx != nonzero_idx) - { - value = spmv_params.d_values[nonzero_idx] * wrapped_vector_x[spmv_params.d_column_indices[nonzero_idx]]; - } - - spmv_params.d_vector_y[row_idx] = value; - } -} - - -/** - * Spmv search kernel. Identifies merge path starting coordinates for each tile. - */ -template < - typename SpmvPolicyT, ///< Parameterized SpmvPolicy tuning policy type - typename OffsetT, ///< Signed integer type for sequence offsets - typename CoordinateT, ///< Merge path coordinate type - typename SpmvParamsT> ///< SpmvParams type -__global__ void DeviceSpmvSearchKernel( - int num_merge_tiles, ///< [in] Number of SpMV merge tiles (spmv grid size) - CoordinateT* d_tile_coordinates, ///< [out] Pointer to the temporary array of tile starting coordinates - SpmvParamsT spmv_params) ///< [in] SpMV input parameter bundle -{ - /// Constants - enum - { - BLOCK_THREADS = SpmvPolicyT::BLOCK_THREADS, - ITEMS_PER_THREAD = SpmvPolicyT::ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - }; - - typedef CacheModifiedInputIterator< - SpmvPolicyT::ROW_OFFSETS_SEARCH_LOAD_MODIFIER, - OffsetT, - OffsetT> - RowOffsetsSearchIteratorT; - - // Find the starting coordinate for all tiles (plus the end coordinate of the last one) - int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x; - if (tile_idx < num_merge_tiles + 1) - { - OffsetT diagonal = (tile_idx * TILE_ITEMS); - CoordinateT tile_coordinate; - CountingInputIterator nonzero_indices(0); - - // Search the merge path - MergePathSearch( - diagonal, - RowOffsetsSearchIteratorT(spmv_params.d_row_end_offsets), - nonzero_indices, - spmv_params.num_rows, - spmv_params.num_nonzeros, - tile_coordinate); - - // Output starting offset - d_tile_coordinates[tile_idx] = tile_coordinate; - } -} - - -/** - * Spmv agent entry point - */ -template < - typename SpmvPolicyT, ///< Parameterized SpmvPolicy tuning policy type - typename ScanTileStateT, ///< Tile status interface type - typename ValueT, ///< Matrix and vector value type - typename OffsetT, ///< Signed integer type for sequence offsets - typename CoordinateT, ///< Merge path coordinate type - bool HAS_ALPHA, ///< Whether the input parameter Alpha is 1 - bool HAS_BETA> ///< Whether the input parameter Beta is 0 -__launch_bounds__ (int(SpmvPolicyT::BLOCK_THREADS)) -__global__ void DeviceSpmvKernel( - SpmvParams spmv_params, ///< [in] SpMV input parameter bundle - CoordinateT* d_tile_coordinates, ///< [in] Pointer to the temporary array of tile starting coordinates - KeyValuePair* d_tile_carry_pairs, ///< [out] Pointer to the temporary array carry-out dot product row-ids, one per block - int num_tiles, ///< [in] Number of merge tiles - ScanTileStateT tile_state, ///< [in] Tile status interface for fixup reduce-by-key kernel - int num_segment_fixup_tiles) ///< [in] Number of reduce-by-key tiles (fixup grid size) -{ - // Spmv agent type specialization - typedef AgentSpmv< - SpmvPolicyT, - ValueT, - OffsetT, - HAS_ALPHA, - HAS_BETA> - AgentSpmvT; - - // Shared memory for AgentSpmv - __shared__ typename AgentSpmvT::TempStorage temp_storage; - - AgentSpmvT(temp_storage, spmv_params).ConsumeTile( - d_tile_coordinates, - d_tile_carry_pairs, - num_tiles); - - // Initialize fixup tile status - tile_state.InitializeStatus(num_segment_fixup_tiles); - -} - - -/** - * Multi-block reduce-by-key sweep kernel entry point - */ -template < - typename AgentSegmentFixupPolicyT, ///< Parameterized AgentSegmentFixupPolicy tuning policy type - typename PairsInputIteratorT, ///< Random-access input iterator type for keys - typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values - typename OffsetT, ///< Signed integer type for global offsets - typename ScanTileStateT> ///< Tile status interface type -__launch_bounds__ (int(AgentSegmentFixupPolicyT::BLOCK_THREADS)) -__global__ void DeviceSegmentFixupKernel( - PairsInputIteratorT d_pairs_in, ///< [in] Pointer to the array carry-out dot product row-ids, one per spmv block - AggregatesOutputIteratorT d_aggregates_out, ///< [in,out] Output value aggregates - OffsetT num_items, ///< [in] Total number of items to select from - int num_tiles, ///< [in] Total number of tiles for the entire problem - ScanTileStateT tile_state) ///< [in] Tile status interface -{ - // Thread block type for reducing tiles of value segments - typedef AgentSegmentFixup< - AgentSegmentFixupPolicyT, - PairsInputIteratorT, - AggregatesOutputIteratorT, - cub::Equality, - cub::Sum, - OffsetT> - AgentSegmentFixupT; - - // Shared memory for AgentSegmentFixup - __shared__ typename AgentSegmentFixupT::TempStorage temp_storage; - - // Process tiles - AgentSegmentFixupT(temp_storage, d_pairs_in, d_aggregates_out, cub::Equality(), cub::Sum()).ConsumeRange( - num_items, - num_tiles, - tile_state); -} - - -/****************************************************************************** - * Dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for DeviceSpmv - */ -template < - typename ValueT, ///< Matrix and vector value type - typename OffsetT> ///< Signed integer type for global offsets -struct DispatchSpmv -{ - //--------------------------------------------------------------------- - // Constants and Types - //--------------------------------------------------------------------- - - enum - { - INIT_KERNEL_THREADS = 128 - }; - - // SpmvParams bundle type - typedef SpmvParams SpmvParamsT; - - // 2D merge path coordinate type - typedef typename CubVector::Type CoordinateT; - - // Tile status descriptor interface type - typedef ReduceByKeyScanTileState ScanTileStateT; - - // Tuple type for scanning (pairs accumulated segment-value with segment-index) - typedef KeyValuePair KeyValuePairT; - - - //--------------------------------------------------------------------- - // Tuning policies - //--------------------------------------------------------------------- - - /// SM11 - struct Policy110 - { - typedef AgentSpmvPolicy< - 128, - 1, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - - typedef AgentSegmentFixupPolicy< - 128, - 4, - BLOCK_LOAD_VECTORIZE, - LOAD_DEFAULT, - BLOCK_SCAN_WARP_SCANS> - SegmentFixupPolicyT; - }; - - /// SM20 - struct Policy200 - { - typedef AgentSpmvPolicy< - 96, - 18, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - false, - BLOCK_SCAN_RAKING> - SpmvPolicyT; - - typedef AgentSegmentFixupPolicy< - 128, - 4, - BLOCK_LOAD_VECTORIZE, - LOAD_DEFAULT, - BLOCK_SCAN_WARP_SCANS> - SegmentFixupPolicyT; - - }; - - - - /// SM30 - struct Policy300 - { - typedef AgentSpmvPolicy< - 96, - 6, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - - typedef AgentSegmentFixupPolicy< - 128, - 4, - BLOCK_LOAD_VECTORIZE, - LOAD_DEFAULT, - BLOCK_SCAN_WARP_SCANS> - SegmentFixupPolicyT; - - }; - - - /// SM35 - struct Policy350 - { - typedef AgentSpmvPolicy< - (sizeof(ValueT) > 4) ? 96 : 128, - (sizeof(ValueT) > 4) ? 4 : 7, - LOAD_LDG, - LOAD_CA, - LOAD_LDG, - LOAD_LDG, - LOAD_LDG, - (sizeof(ValueT) > 4) ? true : false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - - typedef AgentSegmentFixupPolicy< - 128, - 3, - BLOCK_LOAD_VECTORIZE, - LOAD_LDG, - BLOCK_SCAN_WARP_SCANS> - SegmentFixupPolicyT; - }; - - - /// SM37 - struct Policy370 - { - - typedef AgentSpmvPolicy< - (sizeof(ValueT) > 4) ? 128 : 128, - (sizeof(ValueT) > 4) ? 9 : 14, - LOAD_LDG, - LOAD_CA, - LOAD_LDG, - LOAD_LDG, - LOAD_LDG, - false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - - typedef AgentSegmentFixupPolicy< - 128, - 3, - BLOCK_LOAD_VECTORIZE, - LOAD_LDG, - BLOCK_SCAN_WARP_SCANS> - SegmentFixupPolicyT; - }; - - /// SM50 - struct Policy500 - { - typedef AgentSpmvPolicy< - (sizeof(ValueT) > 4) ? 64 : 128, - (sizeof(ValueT) > 4) ? 6 : 7, - LOAD_LDG, - LOAD_DEFAULT, - (sizeof(ValueT) > 4) ? LOAD_LDG : LOAD_DEFAULT, - (sizeof(ValueT) > 4) ? LOAD_LDG : LOAD_DEFAULT, - LOAD_LDG, - (sizeof(ValueT) > 4) ? true : false, - (sizeof(ValueT) > 4) ? BLOCK_SCAN_WARP_SCANS : BLOCK_SCAN_RAKING_MEMOIZE> - SpmvPolicyT; - - - typedef AgentSegmentFixupPolicy< - 128, - 3, - BLOCK_LOAD_VECTORIZE, - LOAD_LDG, - BLOCK_SCAN_RAKING_MEMOIZE> - SegmentFixupPolicyT; - }; - - - - //--------------------------------------------------------------------- - // Tuning policies of current PTX compiler pass - //--------------------------------------------------------------------- - -#if (CUB_PTX_ARCH >= 500) - typedef Policy500 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 370) - typedef Policy370 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 350) - typedef Policy350 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 300) - typedef Policy300 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 200) - typedef Policy200 PtxPolicy; - -#else - typedef Policy110 PtxPolicy; - -#endif - - // "Opaque" policies (whose parameterizations aren't reflected in the type signature) - struct PtxSpmvPolicyT : PtxPolicy::SpmvPolicyT {}; - struct PtxSegmentFixupPolicy : PtxPolicy::SegmentFixupPolicyT {}; - - - //--------------------------------------------------------------------- - // Utilities - //--------------------------------------------------------------------- - - /** - * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use - */ - template - CUB_RUNTIME_FUNCTION __forceinline__ - static void InitConfigs( - int ptx_version, - KernelConfig &spmv_config, - KernelConfig &segment_fixup_config) - { - #if (CUB_PTX_ARCH > 0) - - // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy - spmv_config.template Init(); - segment_fixup_config.template Init(); - - #else - - // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version - if (ptx_version >= 500) - { - spmv_config.template Init(); - segment_fixup_config.template Init(); - } - else if (ptx_version >= 370) - { - spmv_config.template Init(); - segment_fixup_config.template Init(); - } - else if (ptx_version >= 350) - { - spmv_config.template Init(); - segment_fixup_config.template Init(); - } - else if (ptx_version >= 300) - { - spmv_config.template Init(); - segment_fixup_config.template Init(); - - } - else if (ptx_version >= 200) - { - spmv_config.template Init(); - segment_fixup_config.template Init(); - } - else - { - spmv_config.template Init(); - segment_fixup_config.template Init(); - } - - #endif - } - - - /** - * Kernel kernel dispatch configuration. - */ - struct KernelConfig - { - int block_threads; - int items_per_thread; - int tile_items; - - template - CUB_RUNTIME_FUNCTION __forceinline__ - void Init() - { - block_threads = PolicyT::BLOCK_THREADS; - items_per_thread = PolicyT::ITEMS_PER_THREAD; - tile_items = block_threads * items_per_thread; - } - }; - - - //--------------------------------------------------------------------- - // Dispatch entrypoints - //--------------------------------------------------------------------- - - /** - * Internal dispatch routine for computing a device-wide reduction using the - * specified kernel functions. - * - * If the input is larger than a single tile, this method uses two-passes of - * kernel invocations. - */ - template < - typename Spmv1ColKernelT, ///< Function type of cub::DeviceSpmv1ColKernel - typename SpmvSearchKernelT, ///< Function type of cub::AgentSpmvSearchKernel - typename SpmvKernelT, ///< Function type of cub::AgentSpmvKernel - typename SegmentFixupKernelT> ///< Function type of cub::DeviceSegmentFixupKernelT - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SpmvParamsT& spmv_params, ///< SpMV input parameter bundle - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - Spmv1ColKernelT spmv_1col_kernel, ///< [in] Kernel function pointer to parameterization of DeviceSpmv1ColKernel - SpmvSearchKernelT spmv_search_kernel, ///< [in] Kernel function pointer to parameterization of AgentSpmvSearchKernel - SpmvKernelT spmv_kernel, ///< [in] Kernel function pointer to parameterization of AgentSpmvKernel - SegmentFixupKernelT segment_fixup_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceSegmentFixupKernel - KernelConfig spmv_config, ///< [in] Dispatch parameters that match the policy that \p spmv_kernel was compiled for - KernelConfig segment_fixup_config) ///< [in] Dispatch parameters that match the policy that \p segment_fixup_kernel was compiled for - { -#ifndef CUB_RUNTIME_ENABLED - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported ); - -#else - cudaError error = cudaSuccess; - do - { - if (spmv_params.num_cols == 1) - { - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - temp_storage_bytes = 1; - break; - } - - // Get search/init grid dims - int degen_col_kernel_block_size = INIT_KERNEL_THREADS; - int degen_col_kernel_grid_size = (spmv_params.num_rows + degen_col_kernel_block_size - 1) / degen_col_kernel_block_size; - - if (debug_synchronous) _CubLog("Invoking spmv_1col_kernel<<<%d, %d, 0, %lld>>>()\n", - degen_col_kernel_grid_size, degen_col_kernel_block_size, (long long) stream); - - // Invoke spmv_search_kernel - spmv_1col_kernel<<>>( - spmv_params); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - break; - } - - // Get device ordinal - int device_ordinal; - if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; - - // Get SM count - int sm_count; - if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; - - // Get max x-dimension of grid - int max_dim_x; - if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) break;; - - // Total number of spmv work items - int num_merge_items = spmv_params.num_rows + spmv_params.num_nonzeros; - - // Tile sizes of kernels - int merge_tile_size = spmv_config.block_threads * spmv_config.items_per_thread; - int segment_fixup_tile_size = segment_fixup_config.block_threads * segment_fixup_config.items_per_thread; - - // Number of tiles for kernels - unsigned int num_merge_tiles = (num_merge_items + merge_tile_size - 1) / merge_tile_size; - unsigned int num_segment_fixup_tiles = (num_merge_tiles + segment_fixup_tile_size - 1) / segment_fixup_tile_size; - - // Get SM occupancy for kernels - int spmv_sm_occupancy; - if (CubDebug(error = MaxSmOccupancy( - spmv_sm_occupancy, - spmv_kernel, - spmv_config.block_threads))) break; - - int segment_fixup_sm_occupancy; - if (CubDebug(error = MaxSmOccupancy( - segment_fixup_sm_occupancy, - segment_fixup_kernel, - segment_fixup_config.block_threads))) break; - - // Get grid dimensions - dim3 spmv_grid_size( - CUB_MIN(num_merge_tiles, max_dim_x), - (num_merge_tiles + max_dim_x - 1) / max_dim_x, - 1); - - dim3 segment_fixup_grid_size( - CUB_MIN(num_segment_fixup_tiles, max_dim_x), - (num_segment_fixup_tiles + max_dim_x - 1) / max_dim_x, - 1); - - // Get the temporary storage allocation requirements - size_t allocation_sizes[3]; - if (CubDebug(error = ScanTileStateT::AllocationSize(num_segment_fixup_tiles, allocation_sizes[0]))) break; // bytes needed for reduce-by-key tile status descriptors - allocation_sizes[1] = num_merge_tiles * sizeof(KeyValuePairT); // bytes needed for block carry-out pairs - allocation_sizes[2] = (num_merge_tiles + 1) * sizeof(CoordinateT); // bytes needed for tile starting coordinates - - // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) - void* allocations[3]; - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - break; - } - - // Construct the tile status interface - ScanTileStateT tile_state; - if (CubDebug(error = tile_state.Init(num_segment_fixup_tiles, allocations[0], allocation_sizes[0]))) break; - - // Alias the other allocations - KeyValuePairT* d_tile_carry_pairs = (KeyValuePairT*) allocations[1]; // Agent carry-out pairs - CoordinateT* d_tile_coordinates = (CoordinateT*) allocations[2]; // Agent starting coordinates - - // Get search/init grid dims - int search_block_size = INIT_KERNEL_THREADS; - int search_grid_size = (num_merge_tiles + 1 + search_block_size - 1) / search_block_size; - -#if (CUB_PTX_ARCH == 0) - // Init textures - if (CubDebug(error = spmv_params.t_vector_x.BindTexture(spmv_params.d_vector_x))) break; -#endif - - if (search_grid_size < sm_count) -// if (num_merge_tiles < spmv_sm_occupancy * sm_count) - { - // Not enough spmv tiles to saturate the device: have spmv blocks search their own staring coords - d_tile_coordinates = NULL; - } - else - { - // Use separate search kernel if we have enough spmv tiles to saturate the device - - // Log spmv_search_kernel configuration - if (debug_synchronous) _CubLog("Invoking spmv_search_kernel<<<%d, %d, 0, %lld>>>()\n", - search_grid_size, search_block_size, (long long) stream); - - // Invoke spmv_search_kernel - spmv_search_kernel<<>>( - num_merge_tiles, - d_tile_coordinates, - spmv_params); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } - - // Log spmv_kernel configuration - if (debug_synchronous) _CubLog("Invoking spmv_kernel<<<{%d,%d,%d}, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - spmv_grid_size.x, spmv_grid_size.y, spmv_grid_size.z, spmv_config.block_threads, (long long) stream, spmv_config.items_per_thread, spmv_sm_occupancy); - - // Invoke spmv_kernel - spmv_kernel<<>>( - spmv_params, - d_tile_coordinates, - d_tile_carry_pairs, - num_merge_tiles, - tile_state, - num_segment_fixup_tiles); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - // Run reduce-by-key fixup if necessary - if (num_merge_tiles > 1) - { - // Log segment_fixup_kernel configuration - if (debug_synchronous) _CubLog("Invoking segment_fixup_kernel<<<{%d,%d,%d}, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - segment_fixup_grid_size.x, segment_fixup_grid_size.y, segment_fixup_grid_size.z, segment_fixup_config.block_threads, (long long) stream, segment_fixup_config.items_per_thread, segment_fixup_sm_occupancy); - - // Invoke segment_fixup_kernel - segment_fixup_kernel<<>>( - d_tile_carry_pairs, - spmv_params.d_vector_y, - num_merge_tiles, - num_segment_fixup_tiles, - tile_state); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } - -#if (CUB_PTX_ARCH == 0) - // Free textures - if (CubDebug(error = spmv_params.t_vector_x.UnbindTexture())) break; -#endif - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - /** - * Internal dispatch routine for computing a device-wide reduction - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SpmvParamsT& spmv_params, ///< SpMV input parameter bundle - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - #if (CUB_PTX_ARCH == 0) - if (CubDebug(error = PtxVersion(ptx_version))) break; - #else - ptx_version = CUB_PTX_ARCH; - #endif - - // Get kernel kernel dispatch configurations - KernelConfig spmv_config, segment_fixup_config; - InitConfigs(ptx_version, spmv_config, segment_fixup_config); - - if (CubDebug(error = Dispatch( - d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, - DeviceSpmv1ColKernel, - DeviceSpmvSearchKernel, - DeviceSpmvKernel, - DeviceSegmentFixupKernel, - spmv_config, segment_fixup_config))) break; - -/* - // Dispatch - if (spmv_params.beta == 0.0) - { - if (spmv_params.alpha == 1.0) - { - // Dispatch y = A*x - if (CubDebug(error = Dispatch( - d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, - DeviceSpmv1ColKernel, - DeviceSpmvSearchKernel, - DeviceSpmvKernel, - DeviceSegmentFixupKernel, - spmv_config, segment_fixup_config))) break; - } - else - { - // Dispatch y = alpha*A*x - if (CubDebug(error = Dispatch( - d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, - DeviceSpmvSearchKernel, - DeviceSpmvKernel, - DeviceSegmentFixupKernel, - spmv_config, segment_fixup_config))) break; - } - } - else - { - if (spmv_params.alpha == 1.0) - { - // Dispatch y = A*x + beta*y - if (CubDebug(error = Dispatch( - d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, - DeviceSpmvSearchKernel, - DeviceSpmvKernel, - DeviceSegmentFixupKernel, - spmv_config, segment_fixup_config))) break; - } - else - { - // Dispatch y = alpha*A*x + beta*y - if (CubDebug(error = Dispatch( - d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, - DeviceSpmvSearchKernel, - DeviceSpmvKernel, - DeviceSegmentFixupKernel, - spmv_config, segment_fixup_config))) break; - } - } -*/ - } - while (0); - - return error; - } -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_row_based.cuh b/ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_row_based.cuh deleted file mode 100644 index 81db42a..0000000 --- a/ml-xgboost/cub/cub/device/dispatch/dispatch_spmv_row_based.cuh +++ /dev/null @@ -1,877 +0,0 @@ - -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::DeviceSpmv provides device-wide parallel operations for performing sparse-matrix * vector multiplication (SpMV). - */ - -#pragma once - -#include -#include - -#include "../../agent/single_pass_scan_operators.cuh" -#include "../../agent/agent_segment_fixup.cuh" -#include "../../agent/agent_spmv_row_based.cuh" -#include "../../util_type.cuh" -#include "../../util_debug.cuh" -#include "../../util_device.cuh" -#include "../../thread/thread_search.cuh" -#include "../../grid/grid_queue.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * SpMV kernel entry points - *****************************************************************************/ - -/** - * Spmv search kernel. Identifies merge path starting coordinates for each tile. - */ -template < - typename AgentSpmvPolicyT, ///< Parameterized SpmvPolicy tuning policy type - typename ValueT, ///< Matrix and vector value type - typename OffsetT> ///< Signed integer type for sequence offsets -__global__ void DeviceSpmv1ColKernel( - SpmvParams spmv_params) ///< [in] SpMV input parameter bundle -{ - typedef CacheModifiedInputIterator< - AgentSpmvPolicyT::VECTOR_VALUES_LOAD_MODIFIER, - ValueT, - OffsetT> - VectorValueIteratorT; - - VectorValueIteratorT wrapped_vector_x(spmv_params.d_vector_x); - - int row_idx = (blockIdx.x * blockDim.x) + threadIdx.x; - if (row_idx < spmv_params.num_rows) - { - OffsetT end_nonzero_idx = spmv_params.d_row_end_offsets[row_idx]; - OffsetT nonzero_idx = spmv_params.d_row_end_offsets[row_idx - 1]; - - ValueT value = 0.0; - if (end_nonzero_idx != nonzero_idx) - { - value = spmv_params.d_values[nonzero_idx] * wrapped_vector_x[spmv_params.d_column_indices[nonzero_idx]]; - } - - spmv_params.d_vector_y[row_idx] = value; - } -} - - -/** - * Spmv search kernel. Identifies merge path starting coordinates for each tile. - */ -template < - typename SpmvPolicyT, ///< Parameterized SpmvPolicy tuning policy type - typename OffsetT, ///< Signed integer type for sequence offsets - typename CoordinateT, ///< Merge path coordinate type - typename SpmvParamsT> ///< SpmvParams type -__global__ void DeviceSpmvSearchKernel( - int num_spmv_tiles, ///< [in] Number of SpMV merge tiles (spmv grid size) - CoordinateT* d_tile_coordinates, ///< [out] Pointer to the temporary array of tile starting coordinates - SpmvParamsT spmv_params) ///< [in] SpMV input parameter bundle -{ - /// Constants - enum - { - BLOCK_THREADS = SpmvPolicyT::BLOCK_THREADS, - ITEMS_PER_THREAD = SpmvPolicyT::ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - }; - - typedef CacheModifiedInputIterator< - SpmvPolicyT::ROW_OFFSETS_SEARCH_LOAD_MODIFIER, - OffsetT, - OffsetT> - RowOffsetsSearchIteratorT; - - // Find the starting coordinate for all tiles (plus the end coordinate of the last one) - int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x; - if (tile_idx < num_spmv_tiles + 1) - { - OffsetT diagonal = (tile_idx * TILE_ITEMS); - CoordinateT tile_coordinate; - CountingInputIterator nonzero_indices(0); - - // Search the merge path - MergePathSearch( - diagonal, - RowOffsetsSearchIteratorT(spmv_params.d_row_end_offsets), - nonzero_indices, - spmv_params.num_rows, - spmv_params.num_nonzeros, - tile_coordinate); - - // Output starting offset - d_tile_coordinates[tile_idx] = tile_coordinate; - } -} - - -/** - * Spmv agent entry point - */ -template < - typename SpmvPolicyT, ///< Parameterized SpmvPolicy tuning policy type - typename ScanTileStateT, ///< Tile status interface type - typename ValueT, ///< Matrix and vector value type - typename OffsetT, ///< Signed integer type for sequence offsets - typename CoordinateT, ///< Merge path coordinate type - bool HAS_ALPHA, ///< Whether the input parameter Alpha is 1 - bool HAS_BETA> ///< Whether the input parameter Beta is 0 -__launch_bounds__ (int(SpmvPolicyT::BLOCK_THREADS)) -__global__ void DeviceSpmvKernel( - SpmvParams spmv_params, ///< [in] SpMV input parameter bundle -// CoordinateT* d_tile_coordinates, ///< [in] Pointer to the temporary array of tile starting coordinates -// KeyValuePair* d_tile_carry_pairs, ///< [out] Pointer to the temporary array carry-out dot product row-ids, one per block -// int num_tiles, ///< [in] Number of merge tiles -// ScanTileStateT tile_state, ///< [in] Tile status interface for fixup reduce-by-key kernel -// int num_fixup_tiles, ///< [in] Number of reduce-by-key tiles (fixup grid size) - int rows_per_tile) ///< [in] Number of rows per tile -{ - // Spmv agent type specialization - typedef AgentSpmv< - SpmvPolicyT, - ValueT, - OffsetT, - HAS_ALPHA, - HAS_BETA> - AgentSpmvT; - - // Shared memory for AgentSpmv - __shared__ typename AgentSpmvT::TempStorage temp_storage; - - AgentSpmvT(temp_storage, spmv_params).ConsumeTile( - blockIdx.x, - rows_per_tile); - -/* - AgentSpmvT(temp_storage, spmv_params).ConsumeTile( - d_tile_coordinates, - d_tile_carry_pairs, - num_tiles); - - // Initialize fixup tile status - tile_state.InitializeStatus(num_fixup_tiles); -*/ -} - - -/** - * Multi-block reduce-by-key sweep kernel entry point - */ -template < - typename AgentSegmentFixupPolicyT, ///< Parameterized AgentSegmentFixupPolicy tuning policy type - typename PairsInputIteratorT, ///< Random-access input iterator type for keys - typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values - typename OffsetT, ///< Signed integer type for global offsets - typename ScanTileStateT> ///< Tile status interface type -__launch_bounds__ (int(AgentSegmentFixupPolicyT::BLOCK_THREADS)) -__global__ void DeviceSegmentFixupKernel( - PairsInputIteratorT d_pairs_in, ///< [in] Pointer to the array carry-out dot product row-ids, one per spmv block - AggregatesOutputIteratorT d_aggregates_out, ///< [in,out] Output value aggregates - OffsetT num_items, ///< [in] Total number of items to select from - int num_tiles, ///< [in] Total number of tiles for the entire problem - ScanTileStateT tile_state) ///< [in] Tile status interface -{ - // Thread block type for reducing tiles of value segments - typedef AgentSegmentFixup< - AgentSegmentFixupPolicyT, - PairsInputIteratorT, - AggregatesOutputIteratorT, - cub::Equality, - cub::Sum, - OffsetT> - AgentSegmentFixupT; - - // Shared memory for AgentSegmentFixup - __shared__ typename AgentSegmentFixupT::TempStorage temp_storage; - - // Process tiles - AgentSegmentFixupT(temp_storage, d_pairs_in, d_aggregates_out, cub::Equality(), cub::Sum()).ConsumeRange( - num_items, - num_tiles, - tile_state); -} - - -/****************************************************************************** - * Dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for DeviceSpmv - */ -template < - typename ValueT, ///< Matrix and vector value type - typename OffsetT> ///< Signed integer type for global offsets -struct DispatchSpmv -{ - //--------------------------------------------------------------------- - // Constants and Types - //--------------------------------------------------------------------- - - enum - { - INIT_KERNEL_THREADS = 128 - }; - - // SpmvParams bundle type - typedef SpmvParams SpmvParamsT; - - // 2D merge path coordinate type - typedef typename CubVector::Type CoordinateT; - - // Tile status descriptor interface type - typedef ReduceByKeyScanTileState ScanTileStateT; - - // Tuple type for scanning (pairs accumulated segment-value with segment-index) - typedef KeyValuePair KeyValuePairT; - - - //--------------------------------------------------------------------- - // Tuning policies - //--------------------------------------------------------------------- - - /// SM11 - struct Policy110 - { - typedef AgentSpmvPolicy< - 128, - 1, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - - typedef AgentSegmentFixupPolicy< - 128, - 4, - BLOCK_LOAD_VECTORIZE, - LOAD_DEFAULT, - BLOCK_SCAN_WARP_SCANS> - SegmentFixupPolicyT; - }; - - /// SM20 - struct Policy200 - { - typedef AgentSpmvPolicy< - 96, - 18, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - false, - BLOCK_SCAN_RAKING> - SpmvPolicyT; - - typedef AgentSegmentFixupPolicy< - 128, - 4, - BLOCK_LOAD_VECTORIZE, - LOAD_DEFAULT, - BLOCK_SCAN_WARP_SCANS> - SegmentFixupPolicyT; - - }; - - - - /// SM30 - struct Policy300 - { - typedef AgentSpmvPolicy< - 96, - 6, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_DEFAULT, - false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - - typedef AgentSegmentFixupPolicy< - 128, - 4, - BLOCK_LOAD_VECTORIZE, - LOAD_DEFAULT, - BLOCK_SCAN_WARP_SCANS> - SegmentFixupPolicyT; - - }; - - - /// SM35 - struct Policy350 - { - typedef AgentSpmvPolicy< - (sizeof(ValueT) > 4) ? 64 : 128, - (sizeof(ValueT) > 4) ? 7 : 7, - LOAD_LDG, - LOAD_LDG, - LOAD_LDG, - LOAD_LDG, - LOAD_LDG, - false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - - typedef AgentSegmentFixupPolicy< - 128, - 3, - BLOCK_LOAD_VECTORIZE, - LOAD_LDG, - BLOCK_SCAN_WARP_SCANS> - SegmentFixupPolicyT; - }; - - - /// SM37 - struct Policy370 - { - - typedef AgentSpmvPolicy< - (sizeof(ValueT) > 4) ? 128 : 128, - (sizeof(ValueT) > 4) ? 7 : 7, - LOAD_LDG, - LOAD_CA, - LOAD_LDG, - LOAD_LDG, - LOAD_LDG, - false, - BLOCK_SCAN_WARP_SCANS> - SpmvPolicyT; - - typedef AgentSegmentFixupPolicy< - 128, - 3, - BLOCK_LOAD_VECTORIZE, - LOAD_LDG, - BLOCK_SCAN_WARP_SCANS> - SegmentFixupPolicyT; - }; - - /// SM50 - struct Policy500 - { - typedef AgentSpmvPolicy< - (sizeof(ValueT) > 4) ? 64 : 64, - 7, - LOAD_DEFAULT, - LOAD_CA, - LOAD_DEFAULT, - LOAD_DEFAULT, - LOAD_LDG, - false, - BLOCK_SCAN_RAKING_MEMOIZE> - SpmvPolicyT; - - typedef AgentSegmentFixupPolicy< - 128, - 3, - BLOCK_LOAD_VECTORIZE, - LOAD_LDG, - BLOCK_SCAN_RAKING_MEMOIZE> - SegmentFixupPolicyT; - }; - - - - //--------------------------------------------------------------------- - // Tuning policies of current PTX compiler pass - //--------------------------------------------------------------------- - -#if (CUB_PTX_ARCH >= 500) - typedef Policy500 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 370) - typedef Policy370 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 350) - typedef Policy350 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 300) - typedef Policy300 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 200) - typedef Policy200 PtxPolicy; - -#else - typedef Policy110 PtxPolicy; - -#endif - - // "Opaque" policies (whose parameterizations aren't reflected in the type signature) - struct PtxSpmvPolicyT : PtxPolicy::SpmvPolicyT {}; - struct PtxSegmentFixupPolicy : PtxPolicy::SegmentFixupPolicyT {}; - - - //--------------------------------------------------------------------- - // Utilities - //--------------------------------------------------------------------- - - /** - * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use - */ - template - CUB_RUNTIME_FUNCTION __forceinline__ - static void InitConfigs( - int ptx_version, - KernelConfig &spmv_config, - KernelConfig &fixup_config) - { - #if (CUB_PTX_ARCH > 0) - - // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy - spmv_config.template Init(); - fixup_config.template Init(); - - #else - - // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version - if (ptx_version >= 500) - { - spmv_config.template Init(); - fixup_config.template Init(); - } - else if (ptx_version >= 370) - { - spmv_config.template Init(); - fixup_config.template Init(); - } - else if (ptx_version >= 350) - { - spmv_config.template Init(); - fixup_config.template Init(); - } - else if (ptx_version >= 300) - { - spmv_config.template Init(); - fixup_config.template Init(); - - } - else if (ptx_version >= 200) - { - spmv_config.template Init(); - fixup_config.template Init(); - } - else - { - spmv_config.template Init(); - fixup_config.template Init(); - } - - #endif - } - - - /** - * Kernel kernel dispatch configuration. - */ - struct KernelConfig - { - int block_threads; - int items_per_thread; - int tile_items; - - template - CUB_RUNTIME_FUNCTION __forceinline__ - void Init() - { - block_threads = PolicyT::BLOCK_THREADS; - items_per_thread = PolicyT::ITEMS_PER_THREAD; - tile_items = block_threads * items_per_thread; - } - }; - - - //--------------------------------------------------------------------- - // Dispatch entrypoints - //--------------------------------------------------------------------- - - /** - * Internal dispatch routine for computing a device-wide reduction using the - * specified kernel functions. - * - * If the input is larger than a single tile, this method uses two-passes of - * kernel invocations. - */ - template < -// typename Spmv1ColKernelT, ///< Function type of cub::DeviceSpmv1ColKernel -// typename SpmvSearchKernelT, ///< Function type of cub::AgentSpmvSearchKernel - typename SpmvKernelT> ///< Function type of cub::AgentSpmvKernel -// typename SegmentFixupKernelT> ///< Function type of cub::DeviceSegmentFixupKernelT - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SpmvParamsT& spmv_params, ///< SpMV input parameter bundle - cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. -// Spmv1ColKernelT spmv_1col_kernel, ///< [in] Kernel function pointer to parameterization of DeviceSpmv1ColKernel -// SpmvSearchKernelT spmv_search_kernel, ///< [in] Kernel function pointer to parameterization of AgentSpmvSearchKernel - SpmvKernelT spmv_kernel, ///< [in] Kernel function pointer to parameterization of AgentSpmvKernel -// SegmentFixupKernelT fixup_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceSegmentFixupKernel - KernelConfig spmv_config, ///< [in] Dispatch parameters that match the policy that \p spmv_kernel was compiled for - KernelConfig fixup_config) ///< [in] Dispatch parameters that match the policy that \p fixup_kernel was compiled for - { -#ifndef CUB_RUNTIME_ENABLED - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported ); - -#else - cudaError error = cudaSuccess; - do - { -/* - if (spmv_params.num_cols == 1) - { - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - temp_storage_bytes = 1; - return cudaSuccess; - } - - // Get search/init grid dims - int degen_col_kernel_block_size = INIT_KERNEL_THREADS; - int degen_col_kernel_grid_size = (spmv_params.num_rows + degen_col_kernel_block_size - 1) / degen_col_kernel_block_size; - - if (debug_synchronous) _CubLog("Invoking spmv_1col_kernel<<<%d, %d, 0, %lld>>>()\n", - degen_col_kernel_grid_size, degen_col_kernel_block_size, (long long) stream); - - // Invoke spmv_search_kernel - spmv_1col_kernel<<>>( - spmv_params); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - - break; - } -*/ - // Get device ordinal - int device_ordinal; - if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; - - // Get SM count - int sm_count; - if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; - - // Get max x-dimension of grid - int max_dim_x; - if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) break;; - - // Get SM occupancy for kernels - int spmv_sm_occupancy; - if (CubDebug(error = MaxSmOccupancy( - spmv_sm_occupancy, - spmv_kernel, - spmv_config.block_threads))) break; - - // Tile sizes of kernels - int spmv_tile_size = spmv_config.block_threads * spmv_config.items_per_thread; - int fixup_tile_size = fixup_config.block_threads * fixup_config.items_per_thread; - - unsigned int rows_per_tile = spmv_config.block_threads; - - if (spmv_params.num_rows < rows_per_tile * spmv_sm_occupancy * sm_count * 8) - { - // Decrease rows per tile if needed to accomodate high expansion factor - unsigned int expansion_factor = (spmv_params.num_nonzeros) / spmv_params.num_rows; - - if ((expansion_factor > 0) && (expansion_factor > spmv_config.items_per_thread)) - rows_per_tile = (spmv_tile_size) / expansion_factor; - - // Decrease rows per tile if needed to accomodate minimum parallelism - unsigned int spmv_device_occupancy = sm_count * 2; -// unsigned int spmv_device_occupancy = sm_count * ((spmv_sm_occupancy + 1) / 2); - if (spmv_params.num_rows < spmv_device_occupancy * rows_per_tile) - rows_per_tile = (spmv_params.num_rows) / spmv_device_occupancy; - } - - rows_per_tile = CUB_MAX(rows_per_tile, 2); - - if (debug_synchronous) _CubLog("Rows per tile: %d\n", rows_per_tile); - - // Number of tiles for kernels - unsigned int num_spmv_tiles = (spmv_params.num_rows + rows_per_tile - 1) / rows_per_tile; -// unsigned int num_fixup_tiles = (num_spmv_tiles + fixup_tile_size - 1) / fixup_tile_size; - - // Get grid dimensions - dim3 spmv_grid_size( - CUB_MIN(num_spmv_tiles, max_dim_x), - (num_spmv_tiles + max_dim_x - 1) / max_dim_x, - 1); - -/* - dim3 spmv_grid_size( - CUB_MIN(num_spmv_tiles, max_dim_x), - (num_spmv_tiles + max_dim_x - 1) / max_dim_x, - 1); - - dim3 fixup_grid_size( - CUB_MIN(num_fixup_tiles, max_dim_x), - (num_fixup_tiles + max_dim_x - 1) / max_dim_x, - 1); -*/ - // Get the temporary storage allocation requirements - size_t allocation_sizes[3]; -// if (CubDebug(error = ScanTileStateT::AllocationSize(num_fixup_tiles, allocation_sizes[0]))) break; // bytes needed for reduce-by-key tile status descriptors - allocation_sizes[0] = 0; - allocation_sizes[1] = num_spmv_tiles * sizeof(KeyValuePairT); // bytes needed for block carry-out pairs - allocation_sizes[2] = (num_spmv_tiles + 1) * sizeof(CoordinateT); // bytes needed for tile starting coordinates - - // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) - void* allocations[3]; - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - return cudaSuccess; - } - - // Construct the tile status interface -/* - ScanTileStateT tile_state; - if (CubDebug(error = tile_state.Init(num_fixup_tiles, allocations[0], allocation_sizes[0]))) break; -*/ - // Alias the other allocations - KeyValuePairT* d_tile_carry_pairs = (KeyValuePairT*) allocations[1]; // Agent carry-out pairs - CoordinateT* d_tile_coordinates = (CoordinateT*) allocations[2]; // Agent starting coordinates - - // Get search/init grid dims - int search_block_size = INIT_KERNEL_THREADS; - int search_grid_size = (num_spmv_tiles + 1 + search_block_size - 1) / search_block_size; - -#if (CUB_PTX_ARCH == 0) - // Init textures -// if (CubDebug(error = spmv_params.t_vector_x.BindTexture(spmv_params.d_vector_x))) break; -#endif - -/* - if (search_grid_size < sm_count) - { - // Not enough spmv tiles to saturate the device: have spmv blocks search their own staring coords - d_tile_coordinates = NULL; - } - else - { - // Use separate search kernel if we have enough spmv tiles to saturate the device - - // Log spmv_search_kernel configuration - if (debug_synchronous) _CubLog("Invoking spmv_search_kernel<<<%d, %d, 0, %lld>>>()\n", - search_grid_size, search_block_size, (long long) stream); - - // Invoke spmv_search_kernel - spmv_search_kernel<<>>( - num_spmv_tiles, - d_tile_coordinates, - spmv_params); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } -*/ - // Log spmv_kernel configuration - if (debug_synchronous) _CubLog("Invoking spmv_kernel<<<{%d,%d,%d}, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - spmv_grid_size.x, spmv_grid_size.y, spmv_grid_size.z, spmv_config.block_threads, (long long) stream, spmv_config.items_per_thread, spmv_sm_occupancy); - - // Invoke spmv_kernel - spmv_kernel<<>>( - spmv_params, -// d_tile_coordinates, -// d_tile_carry_pairs, -// num_spmv_tiles, -// tile_state, -// num_fixup_tiles, - rows_per_tile); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; -/* - // Run reduce-by-key fixup if necessary - if (num_spmv_tiles > 1) - { - // Log fixup_kernel configuration - if (debug_synchronous) _CubLog("Invoking fixup_kernel<<<{%d,%d,%d}, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - fixup_grid_size.x, fixup_grid_size.y, fixup_grid_size.z, fixup_config.block_threads, (long long) stream, fixup_config.items_per_thread, fixup_sm_occupancy); - - // Invoke fixup_kernel - fixup_kernel<<>>( - d_tile_carry_pairs, - spmv_params.d_vector_y, - num_spmv_tiles, - num_fixup_tiles, - tile_state); - - // Check for failure to launch - if (CubDebug(error = cudaPeekAtLastError())) break; - - // Sync the stream if specified to flush runtime errors - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } -*/ -#if (CUB_PTX_ARCH == 0) - // Free textures -// if (CubDebug(error = spmv_params.t_vector_x.UnbindTexture())) break; -#endif - } - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - /** - * Internal dispatch routine for computing a device-wide reduction - */ - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation - SpmvParamsT& spmv_params, ///< SpMV input parameter bundle - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - #if (CUB_PTX_ARCH == 0) - if (CubDebug(error = PtxVersion(ptx_version))) break; - #else - ptx_version = CUB_PTX_ARCH; - #endif - - // Get kernel kernel dispatch configurations - KernelConfig spmv_config, fixup_config; - InitConfigs(ptx_version, spmv_config, fixup_config); - - if (CubDebug(error = Dispatch( - d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, -// DeviceSpmv1ColKernel, -// DeviceSpmvSearchKernel, - DeviceSpmvKernel, -// DeviceSegmentFixupKernel, - spmv_config, fixup_config))) break; - -/* - // Dispatch - if (spmv_params.beta == 0.0) - { - if (spmv_params.alpha == 1.0) - { - // Dispatch y = A*x - if (CubDebug(error = Dispatch( - d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, - DeviceSpmv1ColKernel, - DeviceSpmvSearchKernel, - DeviceSpmvKernel, - DeviceSegmentFixupKernel, - spmv_config, fixup_config))) break; - } - else - { - // Dispatch y = alpha*A*x - if (CubDebug(error = Dispatch( - d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, - DeviceSpmvSearchKernel, - DeviceSpmvKernel, - DeviceSegmentFixupKernel, - spmv_config, fixup_config))) break; - } - } - else - { - if (spmv_params.alpha == 1.0) - { - // Dispatch y = A*x + beta*y - if (CubDebug(error = Dispatch( - d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, - DeviceSpmvSearchKernel, - DeviceSpmvKernel, - DeviceSegmentFixupKernel, - spmv_config, fixup_config))) break; - } - else - { - // Dispatch y = alpha*A*x + beta*y - if (CubDebug(error = Dispatch( - d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, - DeviceSpmvSearchKernel, - DeviceSpmvKernel, - DeviceSegmentFixupKernel, - spmv_config, fixup_config))) break; - } - } -*/ - } - while (0); - - return error; - } -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/grid/grid_barrier.cuh b/ml-xgboost/cub/cub/grid/grid_barrier.cuh deleted file mode 100644 index 0a98c26..0000000 --- a/ml-xgboost/cub/cub/grid/grid_barrier.cuh +++ /dev/null @@ -1,211 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::GridBarrier implements a software global barrier among thread blocks within a CUDA grid - */ - -#pragma once - -#include "../util_debug.cuh" -#include "../util_namespace.cuh" -#include "../thread/thread_load.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup GridModule - * @{ - */ - - -/** - * \brief GridBarrier implements a software global barrier among thread blocks within a CUDA grid - */ -class GridBarrier -{ -protected : - - typedef unsigned int SyncFlag; - - // Counters in global device memory - SyncFlag* d_sync; - -public: - - /** - * Constructor - */ - GridBarrier() : d_sync(NULL) {} - - - /** - * Synchronize - */ - __device__ __forceinline__ void Sync() const - { - volatile SyncFlag *d_vol_sync = d_sync; - - // Threadfence and syncthreads to make sure global writes are visible before - // thread-0 reports in with its sync counter - __threadfence(); - CTA_SYNC(); - - if (blockIdx.x == 0) - { - // Report in ourselves - if (threadIdx.x == 0) - { - d_vol_sync[blockIdx.x] = 1; - } - - CTA_SYNC(); - - // Wait for everyone else to report in - for (int peer_block = threadIdx.x; peer_block < gridDim.x; peer_block += blockDim.x) - { - while (ThreadLoad(d_sync + peer_block) == 0) - { - __threadfence_block(); - } - } - - CTA_SYNC(); - - // Let everyone know it's safe to proceed - for (int peer_block = threadIdx.x; peer_block < gridDim.x; peer_block += blockDim.x) - { - d_vol_sync[peer_block] = 0; - } - } - else - { - if (threadIdx.x == 0) - { - // Report in - d_vol_sync[blockIdx.x] = 1; - - // Wait for acknowledgment - while (ThreadLoad(d_sync + blockIdx.x) == 1) - { - __threadfence_block(); - } - } - - CTA_SYNC(); - } - } -}; - - -/** - * \brief GridBarrierLifetime extends GridBarrier to provide lifetime management of the temporary device storage needed for cooperation. - * - * Uses RAII for lifetime, i.e., device resources are reclaimed when - * the destructor is called. - */ -class GridBarrierLifetime : public GridBarrier -{ -protected: - - // Number of bytes backed by d_sync - size_t sync_bytes; - -public: - - /** - * Constructor - */ - GridBarrierLifetime() : GridBarrier(), sync_bytes(0) {} - - - /** - * DeviceFrees and resets the progress counters - */ - cudaError_t HostReset() - { - cudaError_t retval = cudaSuccess; - if (d_sync) - { - CubDebug(retval = cudaFree(d_sync)); - d_sync = NULL; - } - sync_bytes = 0; - return retval; - } - - - /** - * Destructor - */ - virtual ~GridBarrierLifetime() - { - HostReset(); - } - - - /** - * Sets up the progress counters for the next kernel launch (lazily - * allocating and initializing them if necessary) - */ - cudaError_t Setup(int sweep_grid_size) - { - cudaError_t retval = cudaSuccess; - do { - size_t new_sync_bytes = sweep_grid_size * sizeof(SyncFlag); - if (new_sync_bytes > sync_bytes) - { - if (d_sync) - { - if (CubDebug(retval = cudaFree(d_sync))) break; - } - - sync_bytes = new_sync_bytes; - - // Allocate and initialize to zero - if (CubDebug(retval = cudaMalloc((void**) &d_sync, sync_bytes))) break; - if (CubDebug(retval = cudaMemset(d_sync, 0, new_sync_bytes))) break; - } - } while (0); - - return retval; - } -}; - - -/** @} */ // end group GridModule - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/grid/grid_even_share.cuh b/ml-xgboost/cub/cub/grid/grid_even_share.cuh deleted file mode 100644 index 8e4cc12..0000000 --- a/ml-xgboost/cub/cub/grid/grid_even_share.cuh +++ /dev/null @@ -1,185 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::GridEvenShare is a descriptor utility for distributing input among CUDA threadblocks in an "even-share" fashion. Each threadblock gets roughly the same number of fixed-size work units (grains). - */ - - -#pragma once - -#include "../util_namespace.cuh" -#include "../util_macro.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup GridModule - * @{ - */ - - -/** - * \brief GridEvenShare is a descriptor utility for distributing input among CUDA threadblocks in an "even-share" fashion. Each threadblock gets roughly the same number of fixed-size work units (grains). - * - * \par Overview - * GridEvenShare indicates which sections of input are to be mapped onto which threadblocks. - * Threadblocks may receive one of three different amounts of work: "big", "normal", - * and "last". The "big" workloads are one scheduling grain larger than "normal". The "last" work unit - * for the last threadblock may be partially-full if the input is not an even multiple of - * the scheduling grain size. - * - * \par - * Before invoking a child grid, a parent thread will typically construct an instance of - * GridEvenShare. The instance can be passed to child threadblocks which can - * initialize their per-threadblock offsets using \p BlockInit(). - * - * \tparam OffsetT Signed integer type for global offsets - */ -template -struct GridEvenShare -{ - OffsetT total_grains; - int big_blocks; - OffsetT big_share; - OffsetT normal_share; - OffsetT normal_base_offset; - - /// Total number of input items - OffsetT num_items; - - /// Grid size in threadblocks - int grid_size; - - /// OffsetT into input marking the beginning of the owning thread block's segment of input tiles - OffsetT block_offset; - - /// OffsetT into input of marking the end (one-past) of the owning thread block's segment of input tiles - OffsetT block_end; - - /** - * \brief Default constructor. Zero-initializes block-specific fields. - */ - __host__ __device__ __forceinline__ GridEvenShare() : - num_items(0), - grid_size(0), - block_offset(0), - block_end(0) {} - - /** - * \brief Constructor. Initializes the grid-specific members \p num_items and \p grid_size. To be called prior prior to kernel launch) - */ - __host__ __device__ __forceinline__ GridEvenShare( - OffsetT num_items, ///< Total number of input items - int max_grid_size, ///< Maximum grid size allowable (actual grid size may be less if not warranted by the the number of input items) - int schedule_granularity) ///< Granularity by which the input can be parcelled into and distributed among threablocks. Usually the thread block's native tile size (or a multiple thereof. - { - this->num_items = num_items; - this->block_offset = num_items; - this->block_end = num_items; - this->total_grains = (num_items + schedule_granularity - 1) / schedule_granularity; - this->grid_size = CUB_MIN(total_grains, max_grid_size); - OffsetT grains_per_block = total_grains / grid_size; - this->big_blocks = total_grains - (grains_per_block * grid_size); // leftover grains go to big blocks - this->normal_share = grains_per_block * schedule_granularity; - this->normal_base_offset = big_blocks * schedule_granularity; - this->big_share = normal_share + schedule_granularity; - } - - - - /** - * \brief Initializes ranges for the specified partition index - */ - __device__ __forceinline__ void Init(int partition_id) - { - if (partition_id < big_blocks) - { - // This threadblock gets a big share of grains (grains_per_block + 1) - block_offset = (partition_id * big_share); - block_end = block_offset + big_share; - } - else if (partition_id < total_grains) - { - // This threadblock gets a normal share of grains (grains_per_block) - block_offset = normal_base_offset + (partition_id * normal_share); - block_end = CUB_MIN(num_items, block_offset + normal_share); - } - } - - - /** - * \brief Initializes ranges for the current thread block (e.g., to be called by each threadblock after startup) - */ - __device__ __forceinline__ void BlockInit() - { - Init(blockIdx.x); - } - - - /** - * Print to stdout - */ - __host__ __device__ __forceinline__ void Print() - { - printf( -#if (CUB_PTX_ARCH > 0) - "\tthreadblock(%d) " - "block_offset(%lu) " - "block_end(%lu) " -#endif - "num_items(%lu) " - "total_grains(%lu) " - "big_blocks(%lu) " - "big_share(%lu) " - "normal_share(%lu)\n", -#if (CUB_PTX_ARCH > 0) - blockIdx.x, - (unsigned long) block_offset, - (unsigned long) block_end, -#endif - (unsigned long) num_items, - (unsigned long) total_grains, - (unsigned long) big_blocks, - (unsigned long) big_share, - (unsigned long) normal_share); - } -}; - - - -/** @} */ // end group GridModule - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/grid/grid_mapping.cuh b/ml-xgboost/cub/cub/grid/grid_mapping.cuh deleted file mode 100644 index fa3574e..0000000 --- a/ml-xgboost/cub/cub/grid/grid_mapping.cuh +++ /dev/null @@ -1,95 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::GridMappingStrategy enumerates alternative strategies for mapping constant-sized tiles of device-wide data onto a grid of CUDA thread blocks. - */ - -#pragma once - -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup GridModule - * @{ - */ - - -/****************************************************************************** - * Mapping policies - *****************************************************************************/ - - -/** - * \brief cub::GridMappingStrategy enumerates alternative strategies for mapping constant-sized tiles of device-wide data onto a grid of CUDA thread blocks. - */ -enum GridMappingStrategy -{ - /** - * \brief An "even-share" strategy for assigning input tiles to thread blocks. - * - * \par Overview - * The input is evenly partitioned into \p p segments, where \p p is - * constant and corresponds loosely to the number of thread blocks that may - * actively reside on the target device. Each segment is comprised of - * consecutive tiles, where a tile is a small, constant-sized unit of input - * to be processed to completion before the thread block terminates or - * obtains more work. The kernel invokes \p p thread blocks, each - * of which iteratively consumes a segment of n/p elements - * in tile-size increments. - */ - GRID_MAPPING_EVEN_SHARE, - - /** - * \brief A dynamic "queue-based" strategy for assigning input tiles to thread blocks. - * - * \par Overview - * The input is treated as a queue to be dynamically consumed by a grid of - * thread blocks. Work is atomically dequeued in tiles, where a tile is a - * unit of input to be processed to completion before the thread block - * terminates or obtains more work. The grid size \p p is constant, - * loosely corresponding to the number of thread blocks that may actively - * reside on the target device. - */ - GRID_MAPPING_DYNAMIC, -}; - - -/** @} */ // end group GridModule - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/grid/grid_queue.cuh b/ml-xgboost/cub/cub/grid/grid_queue.cuh deleted file mode 100644 index ff015ea..0000000 --- a/ml-xgboost/cub/cub/grid/grid_queue.cuh +++ /dev/null @@ -1,220 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::GridQueue is a descriptor utility for dynamic queue management. - */ - -#pragma once - -#include "../util_namespace.cuh" -#include "../util_debug.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup GridModule - * @{ - */ - - -/** - * \brief GridQueue is a descriptor utility for dynamic queue management. - * - * \par Overview - * GridQueue descriptors provides abstractions for "filling" or - * "draining" globally-shared vectors. - * - * \par - * A "filling" GridQueue works by atomically-adding to a zero-initialized counter, - * returning a unique offset for the calling thread to write its items. - * The GridQueue maintains the total "fill-size". The fill counter must be reset - * using GridQueue::ResetFill by the host or kernel instance prior to the kernel instance that - * will be filling. - * - * \par - * Similarly, a "draining" GridQueue works by works by atomically-incrementing a - * zero-initialized counter, returning a unique offset for the calling thread to - * read its items. Threads can safely drain until the array's logical fill-size is - * exceeded. The drain counter must be reset using GridQueue::ResetDrain or - * GridQueue::FillAndResetDrain by the host or kernel instance prior to the kernel instance that - * will be filling. (For dynamic work distribution of existing data, the corresponding fill-size - * is simply the number of elements in the array.) - * - * \par - * Iterative work management can be implemented simply with a pair of flip-flopping - * work buffers, each with an associated set of fill and drain GridQueue descriptors. - * - * \tparam OffsetT Signed integer type for global offsets - */ -template -class GridQueue -{ -private: - - /// Counter indices - enum - { - FILL = 0, - DRAIN = 1, - }; - - /// Pair of counters - OffsetT *d_counters; - -public: - - /// Returns the device allocation size in bytes needed to construct a GridQueue instance - __host__ __device__ __forceinline__ - static size_t AllocationSize() - { - return sizeof(OffsetT) * 2; - } - - - /// Constructs an invalid GridQueue descriptor - __host__ __device__ __forceinline__ GridQueue() - : - d_counters(NULL) - {} - - - /// Constructs a GridQueue descriptor around the device storage allocation - __host__ __device__ __forceinline__ GridQueue( - void *d_storage) ///< Device allocation to back the GridQueue. Must be at least as big as AllocationSize(). - : - d_counters((OffsetT*) d_storage) - {} - - - /// This operation sets the fill-size and resets the drain counter, preparing the GridQueue for draining in the next kernel instance. To be called by the host or by a kernel prior to that which will be draining. - __host__ __device__ __forceinline__ cudaError_t FillAndResetDrain( - OffsetT fill_size, - cudaStream_t stream = 0) - { -#if (CUB_PTX_ARCH > 0) - (void)stream; - d_counters[FILL] = fill_size; - d_counters[DRAIN] = 0; - return cudaSuccess; -#else - OffsetT counters[2]; - counters[FILL] = fill_size; - counters[DRAIN] = 0; - return CubDebug(cudaMemcpyAsync(d_counters, counters, sizeof(OffsetT) * 2, cudaMemcpyHostToDevice, stream)); -#endif - } - - - /// This operation resets the drain so that it may advance to meet the existing fill-size. To be called by the host or by a kernel prior to that which will be draining. - __host__ __device__ __forceinline__ cudaError_t ResetDrain(cudaStream_t stream = 0) - { -#if (CUB_PTX_ARCH > 0) - (void)stream; - d_counters[DRAIN] = 0; - return cudaSuccess; -#else - return CubDebug(cudaMemsetAsync(d_counters + DRAIN, 0, sizeof(OffsetT), stream)); -#endif - } - - - /// This operation resets the fill counter. To be called by the host or by a kernel prior to that which will be filling. - __host__ __device__ __forceinline__ cudaError_t ResetFill(cudaStream_t stream = 0) - { -#if (CUB_PTX_ARCH > 0) - (void)stream; - d_counters[FILL] = 0; - return cudaSuccess; -#else - return CubDebug(cudaMemsetAsync(d_counters + FILL, 0, sizeof(OffsetT), stream)); -#endif - } - - - /// Returns the fill-size established by the parent or by the previous kernel. - __host__ __device__ __forceinline__ cudaError_t FillSize( - OffsetT &fill_size, - cudaStream_t stream = 0) - { -#if (CUB_PTX_ARCH > 0) - (void)stream; - fill_size = d_counters[FILL]; - return cudaSuccess; -#else - return CubDebug(cudaMemcpyAsync(&fill_size, d_counters + FILL, sizeof(OffsetT), cudaMemcpyDeviceToHost, stream)); -#endif - } - - - /// Drain \p num_items from the queue. Returns offset from which to read items. To be called from CUDA kernel. - __device__ __forceinline__ OffsetT Drain(OffsetT num_items) - { - return atomicAdd(d_counters + DRAIN, num_items); - } - - - /// Fill \p num_items into the queue. Returns offset from which to write items. To be called from CUDA kernel. - __device__ __forceinline__ OffsetT Fill(OffsetT num_items) - { - return atomicAdd(d_counters + FILL, num_items); - } -}; - - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - -/** - * Reset grid queue (call with 1 block of 1 thread) - */ -template -__global__ void FillAndResetDrainKernel( - GridQueue grid_queue, - OffsetT num_items) -{ - grid_queue.FillAndResetDrain(num_items); -} - - - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - -/** @} */ // end group GridModule - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - - diff --git a/ml-xgboost/cub/cub/host/mutex.cuh b/ml-xgboost/cub/cub/host/mutex.cuh deleted file mode 100644 index 6a2aa6f..0000000 --- a/ml-xgboost/cub/cub/host/mutex.cuh +++ /dev/null @@ -1,167 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Simple portable mutex - */ - - -#pragma once - -#if __cplusplus > 199711L - #include -#else - #if defined(_WIN32) || defined(_WIN64) - #include - #include - #undef small // Windows is terrible for polluting macro namespace - - /** - * Compiler read/write barrier - */ - #pragma intrinsic(_ReadWriteBarrier) - - #endif -#endif - -#include "../util_namespace.cuh" - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * Simple portable mutex - * - Wraps std::mutex when compiled with C++11 or newer (supported on all platforms) - * - Uses GNU/Windows spinlock mechanisms for pre C++11 (supported on x86/x64 when compiled with cl.exe or g++) - */ -struct Mutex -{ -#if __cplusplus > 199711L - - std::mutex mtx; - - void Lock() - { - mtx.lock(); - } - - void Unlock() - { - mtx.unlock(); - } - - void TryLock() - { - mtx.try_lock(); - } - -#else //__cplusplus > 199711L - - #if defined(_MSC_VER) - - // Microsoft VC++ - typedef long Spinlock; - - #else - - // GNU g++ - typedef int Spinlock; - - /** - * Compiler read/write barrier - */ - __forceinline__ void _ReadWriteBarrier() - { - __sync_synchronize(); - } - - /** - * Atomic exchange - */ - __forceinline__ long _InterlockedExchange(volatile int * const Target, const int Value) - { - // NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier - _ReadWriteBarrier(); - return __sync_lock_test_and_set(Target, Value); - } - - /** - * Pause instruction to prevent excess processor bus usage - */ - __forceinline__ void YieldProcessor() - { - } - - #endif // defined(_MSC_VER) - - /// Lock member - volatile Spinlock lock; - - /** - * Constructor - */ - Mutex() : lock(0) {} - - /** - * Return when the specified spinlock has been acquired - */ - __forceinline__ void Lock() - { - while (1) - { - if (!_InterlockedExchange(&lock, 1)) return; - while (lock) YieldProcessor(); - } - } - - - /** - * Release the specified spinlock - */ - __forceinline__ void Unlock() - { - _ReadWriteBarrier(); - lock = 0; - } - -#endif // __cplusplus > 199711L - -}; - - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - diff --git a/ml-xgboost/cub/cub/iterator/arg_index_input_iterator.cuh b/ml-xgboost/cub/cub/iterator/arg_index_input_iterator.cuh deleted file mode 100644 index 4a7e852..0000000 --- a/ml-xgboost/cub/cub/iterator/arg_index_input_iterator.cuh +++ /dev/null @@ -1,259 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Random-access iterator types - */ - -#pragma once - -#include -#include - -#include "../thread/thread_load.cuh" -#include "../thread/thread_store.cuh" -#include "../util_device.cuh" -#include "../util_namespace.cuh" - -#include - -#if (THRUST_VERSION >= 100700) - // This iterator is compatible with Thrust API 1.7 and newer - #include - #include -#endif // THRUST_VERSION - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \addtogroup UtilIterator - * @{ - */ - - -/** - * \brief A random-access input wrapper for pairing dereferenced values with their corresponding indices (forming \p KeyValuePair tuples). - * - * \par Overview - * - ArgIndexInputIteratorTwraps a random access input iterator \p itr of type \p InputIteratorT. - * Dereferencing an ArgIndexInputIteratorTat offset \p i produces a \p KeyValuePair value whose - * \p key field is \p i and whose \p value field is itr[i]. - * - Can be used with any data type. - * - Can be constructed, manipulated, and exchanged within and between host and device - * functions. Wrapped host memory can only be dereferenced on the host, and wrapped - * device memory can only be dereferenced on the device. - * - Compatible with Thrust API v1.7 or newer. - * - * \par Snippet - * The code snippet below illustrates the use of \p ArgIndexInputIteratorTto - * dereference an array of doubles - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize a device array - * double *d_in; // e.g., [8.0, 6.0, 7.0, 5.0, 3.0, 0.0, 9.0] - * - * // Create an iterator wrapper - * cub::ArgIndexInputIterator itr(d_in); - * - * // Within device code: - * typedef typename cub::ArgIndexInputIterator::value_type Tuple; - * Tuple item_offset_pair.key = *itr; - * printf("%f @ %d\n", - * item_offset_pair.value, - * item_offset_pair.key); // 8.0 @ 0 - * - * itr = itr + 6; - * item_offset_pair.key = *itr; - * printf("%f @ %d\n", - * item_offset_pair.value, - * item_offset_pair.key); // 9.0 @ 6 - * - * \endcode - * - * \tparam InputIteratorT The value type of the wrapped input iterator - * \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t) - * \tparam OutputValueT The paired value type of the tuple (Default: value type of input iterator) - */ -template < - typename InputIteratorT, - typename OffsetT = ptrdiff_t, - typename OutputValueT = typename std::iterator_traits::value_type> -class ArgIndexInputIterator -{ -public: - - // Required iterator traits - typedef ArgIndexInputIterator self_type; ///< My own type - typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another - typedef KeyValuePair value_type; ///< The type of the element the iterator can point to - typedef value_type* pointer; ///< The type of a pointer to an element the iterator can point to - typedef value_type reference; ///< The type of a reference to an element the iterator can point to - -#if (THRUST_VERSION >= 100700) - // Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods - typedef typename thrust::detail::iterator_facade_category< - thrust::any_system_tag, - thrust::random_access_traversal_tag, - value_type, - reference - >::type iterator_category; ///< The iterator category -#else - typedef std::random_access_iterator_tag iterator_category; ///< The iterator category -#endif // THRUST_VERSION - -private: - - InputIteratorT itr; - difference_type offset; - -public: - - /// Constructor - __host__ __device__ __forceinline__ ArgIndexInputIterator( - InputIteratorT itr, ///< Input iterator to wrap - difference_type offset = 0) ///< OffsetT (in items) from \p itr denoting the position of the iterator - : - itr(itr), - offset(offset) - {} - - /// Postfix increment - __host__ __device__ __forceinline__ self_type operator++(int) - { - self_type retval = *this; - offset++; - return retval; - } - - /// Prefix increment - __host__ __device__ __forceinline__ self_type operator++() - { - offset++; - return *this; - } - - /// Indirection - __host__ __device__ __forceinline__ reference operator*() const - { - value_type retval; - retval.value = itr[offset]; - retval.key = offset; - return retval; - } - - /// Addition - template - __host__ __device__ __forceinline__ self_type operator+(Distance n) const - { - self_type retval(itr, offset + n); - return retval; - } - - /// Addition assignment - template - __host__ __device__ __forceinline__ self_type& operator+=(Distance n) - { - offset += n; - return *this; - } - - /// Subtraction - template - __host__ __device__ __forceinline__ self_type operator-(Distance n) const - { - self_type retval(itr, offset - n); - return retval; - } - - /// Subtraction assignment - template - __host__ __device__ __forceinline__ self_type& operator-=(Distance n) - { - offset -= n; - return *this; - } - - /// Distance - __host__ __device__ __forceinline__ difference_type operator-(self_type other) const - { - return offset - other.offset; - } - - /// Array subscript - template - __host__ __device__ __forceinline__ reference operator[](Distance n) const - { - self_type offset = (*this) + n; - return *offset; - } - - /// Structure dereference - __host__ __device__ __forceinline__ pointer operator->() - { - return &(*(*this)); - } - - /// Equal to - __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) - { - return ((itr == rhs.itr) && (offset == rhs.offset)); - } - - /// Not equal to - __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) - { - return ((itr != rhs.itr) || (offset != rhs.offset)); - } - - /// Normalize - __host__ __device__ __forceinline__ void normalize() - { - itr += offset; - offset = 0; - } - - /// ostream operator - friend std::ostream& operator<<(std::ostream& os, const self_type& /*itr*/) - { - return os; - } -}; - - - -/** @} */ // end group UtilIterator - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/iterator/cache_modified_input_iterator.cuh b/ml-xgboost/cub/cub/iterator/cache_modified_input_iterator.cuh deleted file mode 100644 index fb97e90..0000000 --- a/ml-xgboost/cub/cub/iterator/cache_modified_input_iterator.cuh +++ /dev/null @@ -1,240 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Random-access iterator types - */ - -#pragma once - -#include -#include - -#include "../thread/thread_load.cuh" -#include "../thread/thread_store.cuh" -#include "../util_device.cuh" -#include "../util_namespace.cuh" - -#if (THRUST_VERSION >= 100700) - // This iterator is compatible with Thrust API 1.7 and newer - #include - #include -#endif // THRUST_VERSION - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - - -/** - * \addtogroup UtilIterator - * @{ - */ - - -/** - * \brief A random-access input wrapper for dereferencing array values using a PTX cache load modifier. - * - * \par Overview - * - CacheModifiedInputIteratorTis a random-access input iterator that wraps a native - * device pointer of type ValueType*. \p ValueType references are - * made by reading \p ValueType values through loads modified by \p MODIFIER. - * - Can be used to load any data type from memory using PTX cache load modifiers (e.g., "LOAD_LDG", - * "LOAD_CG", "LOAD_CA", "LOAD_CS", "LOAD_CV", etc.). - * - Can be constructed, manipulated, and exchanged within and between host and device - * functions, but can only be dereferenced within device functions. - * - Compatible with Thrust API v1.7 or newer. - * - * \par Snippet - * The code snippet below illustrates the use of \p CacheModifiedInputIteratorTto - * dereference a device array of double using the "ldg" PTX load modifier - * (i.e., load values through texture cache). - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize a device array - * double *d_in; // e.g., [8.0, 6.0, 7.0, 5.0, 3.0, 0.0, 9.0] - * - * // Create an iterator wrapper - * cub::CacheModifiedInputIterator itr(d_in); - * - * // Within device code: - * printf("%f\n", itr[0]); // 8.0 - * printf("%f\n", itr[1]); // 6.0 - * printf("%f\n", itr[6]); // 9.0 - * - * \endcode - * - * \tparam CacheLoadModifier The cub::CacheLoadModifier to use when accessing data - * \tparam ValueType The value type of this iterator - * \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t) - */ -template < - CacheLoadModifier MODIFIER, - typename ValueType, - typename OffsetT = ptrdiff_t> -class CacheModifiedInputIterator -{ -public: - - // Required iterator traits - typedef CacheModifiedInputIterator self_type; ///< My own type - typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another - typedef ValueType value_type; ///< The type of the element the iterator can point to - typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to - typedef ValueType reference; ///< The type of a reference to an element the iterator can point to - -#if (THRUST_VERSION >= 100700) - // Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods - typedef typename thrust::detail::iterator_facade_category< - thrust::device_system_tag, - thrust::random_access_traversal_tag, - value_type, - reference - >::type iterator_category; ///< The iterator category -#else - typedef std::random_access_iterator_tag iterator_category; ///< The iterator category -#endif // THRUST_VERSION - - -public: - - /// Wrapped native pointer - ValueType* ptr; - - /// Constructor - template - __host__ __device__ __forceinline__ CacheModifiedInputIterator( - QualifiedValueType* ptr) ///< Native pointer to wrap - : - ptr(const_cast::Type *>(ptr)) - {} - - /// Postfix increment - __host__ __device__ __forceinline__ self_type operator++(int) - { - self_type retval = *this; - ptr++; - return retval; - } - - /// Prefix increment - __host__ __device__ __forceinline__ self_type operator++() - { - ptr++; - return *this; - } - - /// Indirection - __device__ __forceinline__ reference operator*() const - { - return ThreadLoad(ptr); - } - - /// Addition - template - __host__ __device__ __forceinline__ self_type operator+(Distance n) const - { - self_type retval(ptr + n); - return retval; - } - - /// Addition assignment - template - __host__ __device__ __forceinline__ self_type& operator+=(Distance n) - { - ptr += n; - return *this; - } - - /// Subtraction - template - __host__ __device__ __forceinline__ self_type operator-(Distance n) const - { - self_type retval(ptr - n); - return retval; - } - - /// Subtraction assignment - template - __host__ __device__ __forceinline__ self_type& operator-=(Distance n) - { - ptr -= n; - return *this; - } - - /// Distance - __host__ __device__ __forceinline__ difference_type operator-(self_type other) const - { - return ptr - other.ptr; - } - - /// Array subscript - template - __device__ __forceinline__ reference operator[](Distance n) const - { - return ThreadLoad(ptr + n); - } - - /// Structure dereference - __device__ __forceinline__ pointer operator->() - { - return &ThreadLoad(ptr); - } - - /// Equal to - __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) - { - return (ptr == rhs.ptr); - } - - /// Not equal to - __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) - { - return (ptr != rhs.ptr); - } - - /// ostream operator - friend std::ostream& operator<<(std::ostream& os, const self_type& /*itr*/) - { - return os; - } -}; - - - -/** @} */ // end group UtilIterator - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/iterator/cache_modified_output_iterator.cuh b/ml-xgboost/cub/cub/iterator/cache_modified_output_iterator.cuh deleted file mode 100644 index 1fb0bf6..0000000 --- a/ml-xgboost/cub/cub/iterator/cache_modified_output_iterator.cuh +++ /dev/null @@ -1,254 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Random-access iterator types - */ - -#pragma once - -#include -#include - -#include "../thread/thread_load.cuh" -#include "../thread/thread_store.cuh" -#include "../util_device.cuh" -#include "../util_namespace.cuh" - -#if (THRUST_VERSION >= 100700) - // This iterator is compatible with Thrust API 1.7 and newer - #include - #include -#endif // THRUST_VERSION - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup UtilIterator - * @{ - */ - - -/** - * \brief A random-access output wrapper for storing array values using a PTX cache-modifier. - * - * \par Overview - * - CacheModifiedOutputIterator is a random-access output iterator that wraps a native - * device pointer of type ValueType*. \p ValueType references are - * made by writing \p ValueType values through stores modified by \p MODIFIER. - * - Can be used to store any data type to memory using PTX cache store modifiers (e.g., "STORE_WB", - * "STORE_CG", "STORE_CS", "STORE_WT", etc.). - * - Can be constructed, manipulated, and exchanged within and between host and device - * functions, but can only be dereferenced within device functions. - * - Compatible with Thrust API v1.7 or newer. - * - * \par Snippet - * The code snippet below illustrates the use of \p CacheModifiedOutputIterator to - * dereference a device array of doubles using the "wt" PTX load modifier - * (i.e., write-through to system memory). - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize a device array - * double *d_out; // e.g., [, , , , , , ] - * - * // Create an iterator wrapper - * cub::CacheModifiedOutputIterator itr(d_out); - * - * // Within device code: - * itr[0] = 8.0; - * itr[1] = 66.0; - * itr[55] = 24.0; - * - * \endcode - * - * \par Usage Considerations - * - Can only be dereferenced within device code - * - * \tparam CacheStoreModifier The cub::CacheStoreModifier to use when accessing data - * \tparam ValueType The value type of this iterator - * \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t) - */ -template < - CacheStoreModifier MODIFIER, - typename ValueType, - typename OffsetT = ptrdiff_t> -class CacheModifiedOutputIterator -{ -private: - - // Proxy object - struct Reference - { - ValueType* ptr; - - /// Constructor - __host__ __device__ __forceinline__ Reference(ValueType* ptr) : ptr(ptr) {} - - /// Assignment - __device__ __forceinline__ ValueType operator =(ValueType val) - { - ThreadStore(ptr, val); - return val; - } - }; - -public: - - // Required iterator traits - typedef CacheModifiedOutputIterator self_type; ///< My own type - typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another - typedef void value_type; ///< The type of the element the iterator can point to - typedef void pointer; ///< The type of a pointer to an element the iterator can point to - typedef Reference reference; ///< The type of a reference to an element the iterator can point to - -#if (THRUST_VERSION >= 100700) - // Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods - typedef typename thrust::detail::iterator_facade_category< - thrust::device_system_tag, - thrust::random_access_traversal_tag, - value_type, - reference - >::type iterator_category; ///< The iterator category -#else - typedef std::random_access_iterator_tag iterator_category; ///< The iterator category -#endif // THRUST_VERSION - -private: - - ValueType* ptr; - -public: - - /// Constructor - template - __host__ __device__ __forceinline__ CacheModifiedOutputIterator( - QualifiedValueType* ptr) ///< Native pointer to wrap - : - ptr(const_cast::Type *>(ptr)) - {} - - /// Postfix increment - __host__ __device__ __forceinline__ self_type operator++(int) - { - self_type retval = *this; - ptr++; - return retval; - } - - - /// Prefix increment - __host__ __device__ __forceinline__ self_type operator++() - { - ptr++; - return *this; - } - - /// Indirection - __host__ __device__ __forceinline__ reference operator*() const - { - return Reference(ptr); - } - - /// Addition - template - __host__ __device__ __forceinline__ self_type operator+(Distance n) const - { - self_type retval(ptr + n); - return retval; - } - - /// Addition assignment - template - __host__ __device__ __forceinline__ self_type& operator+=(Distance n) - { - ptr += n; - return *this; - } - - /// Subtraction - template - __host__ __device__ __forceinline__ self_type operator-(Distance n) const - { - self_type retval(ptr - n); - return retval; - } - - /// Subtraction assignment - template - __host__ __device__ __forceinline__ self_type& operator-=(Distance n) - { - ptr -= n; - return *this; - } - - /// Distance - __host__ __device__ __forceinline__ difference_type operator-(self_type other) const - { - return ptr - other.ptr; - } - - /// Array subscript - template - __host__ __device__ __forceinline__ reference operator[](Distance n) const - { - return Reference(ptr + n); - } - - /// Equal to - __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) - { - return (ptr == rhs.ptr); - } - - /// Not equal to - __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) - { - return (ptr != rhs.ptr); - } - - /// ostream operator - friend std::ostream& operator<<(std::ostream& os, const self_type& itr) - { - return os; - } -}; - - -/** @} */ // end group UtilIterator - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/iterator/constant_input_iterator.cuh b/ml-xgboost/cub/cub/iterator/constant_input_iterator.cuh deleted file mode 100644 index e205c43..0000000 --- a/ml-xgboost/cub/cub/iterator/constant_input_iterator.cuh +++ /dev/null @@ -1,235 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Random-access iterator types - */ - -#pragma once - -#include -#include - -#include "../thread/thread_load.cuh" -#include "../thread/thread_store.cuh" -#include "../util_namespace.cuh" - -#if (THRUST_VERSION >= 100700) - // This iterator is compatible with Thrust API 1.7 and newer - #include - #include -#endif // THRUST_VERSION - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup UtilIterator - * @{ - */ - - -/** - * \brief A random-access input generator for dereferencing a sequence of homogeneous values - * - * \par Overview - * - Read references to a ConstantInputIteratorTiterator always return the supplied constant - * of type \p ValueType. - * - Can be used with any data type. - * - Can be constructed, manipulated, dereferenced, and exchanged within and between host and device - * functions. - * - Compatible with Thrust API v1.7 or newer. - * - * \par Snippet - * The code snippet below illustrates the use of \p ConstantInputIteratorTto - * dereference a sequence of homogeneous doubles. - * \par - * \code - * #include // or equivalently - * - * cub::ConstantInputIterator itr(5.0); - * - * printf("%f\n", itr[0]); // 5.0 - * printf("%f\n", itr[1]); // 5.0 - * printf("%f\n", itr[2]); // 5.0 - * printf("%f\n", itr[50]); // 5.0 - * - * \endcode - * - * \tparam ValueType The value type of this iterator - * \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t) - */ -template < - typename ValueType, - typename OffsetT = ptrdiff_t> -class ConstantInputIterator -{ -public: - - // Required iterator traits - typedef ConstantInputIterator self_type; ///< My own type - typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another - typedef ValueType value_type; ///< The type of the element the iterator can point to - typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to - typedef ValueType reference; ///< The type of a reference to an element the iterator can point to - -#if (THRUST_VERSION >= 100700) - // Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods - typedef typename thrust::detail::iterator_facade_category< - thrust::any_system_tag, - thrust::random_access_traversal_tag, - value_type, - reference - >::type iterator_category; ///< The iterator category -#else - typedef std::random_access_iterator_tag iterator_category; ///< The iterator category -#endif // THRUST_VERSION - -private: - - ValueType val; - OffsetT offset; -#ifdef _WIN32 - OffsetT pad[CUB_MAX(1, (16 / sizeof(OffsetT) - 1))]; // Workaround for win32 parameter-passing bug (ulonglong2 argmin DeviceReduce) -#endif - -public: - - /// Constructor - __host__ __device__ __forceinline__ ConstantInputIterator( - ValueType val, ///< Starting value for the iterator instance to report - OffsetT offset = 0) ///< Base offset - : - val(val), - offset(offset) - {} - - /// Postfix increment - __host__ __device__ __forceinline__ self_type operator++(int) - { - self_type retval = *this; - offset++; - return retval; - } - - /// Prefix increment - __host__ __device__ __forceinline__ self_type operator++() - { - offset++; - return *this; - } - - /// Indirection - __host__ __device__ __forceinline__ reference operator*() const - { - return val; - } - - /// Addition - template - __host__ __device__ __forceinline__ self_type operator+(Distance n) const - { - self_type retval(val, offset + n); - return retval; - } - - /// Addition assignment - template - __host__ __device__ __forceinline__ self_type& operator+=(Distance n) - { - offset += n; - return *this; - } - - /// Subtraction - template - __host__ __device__ __forceinline__ self_type operator-(Distance n) const - { - self_type retval(val, offset - n); - return retval; - } - - /// Subtraction assignment - template - __host__ __device__ __forceinline__ self_type& operator-=(Distance n) - { - offset -= n; - return *this; - } - - /// Distance - __host__ __device__ __forceinline__ difference_type operator-(self_type other) const - { - return offset - other.offset; - } - - /// Array subscript - template - __host__ __device__ __forceinline__ reference operator[](Distance /*n*/) const - { - return val; - } - - /// Structure dereference - __host__ __device__ __forceinline__ pointer operator->() - { - return &val; - } - - /// Equal to - __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) - { - return (offset == rhs.offset) && ((val == rhs.val)); - } - - /// Not equal to - __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) - { - return (offset != rhs.offset) || (val!= rhs.val); - } - - /// ostream operator - friend std::ostream& operator<<(std::ostream& os, const self_type& itr) - { - os << "[" << itr.val << "," << itr.offset << "]"; - return os; - } - -}; - - -/** @} */ // end group UtilIterator - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/iterator/counting_input_iterator.cuh b/ml-xgboost/cub/cub/iterator/counting_input_iterator.cuh deleted file mode 100644 index 224b664..0000000 --- a/ml-xgboost/cub/cub/iterator/counting_input_iterator.cuh +++ /dev/null @@ -1,228 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Random-access iterator types - */ - -#pragma once - -#include -#include - -#include "../thread/thread_load.cuh" -#include "../thread/thread_store.cuh" -#include "../util_device.cuh" -#include "../util_namespace.cuh" - -#if (THRUST_VERSION >= 100700) - // This iterator is compatible with Thrust API 1.7 and newer - #include - #include -#endif // THRUST_VERSION - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \addtogroup UtilIterator - * @{ - */ - -/** - * \brief A random-access input generator for dereferencing a sequence of incrementing integer values. - * - * \par Overview - * - After initializing a CountingInputIteratorTto a certain integer \p base, read references - * at \p offset will return the value \p base + \p offset. - * - Can be constructed, manipulated, dereferenced, and exchanged within and between host and device - * functions. - * - Compatible with Thrust API v1.7 or newer. - * - * \par Snippet - * The code snippet below illustrates the use of \p CountingInputIteratorTto - * dereference a sequence of incrementing integers. - * \par - * \code - * #include // or equivalently - * - * cub::CountingInputIterator itr(5); - * - * printf("%d\n", itr[0]); // 5 - * printf("%d\n", itr[1]); // 6 - * printf("%d\n", itr[2]); // 7 - * printf("%d\n", itr[50]); // 55 - * - * \endcode - * - * \tparam ValueType The value type of this iterator - * \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t) - */ -template < - typename ValueType, - typename OffsetT = ptrdiff_t> -class CountingInputIterator -{ -public: - - // Required iterator traits - typedef CountingInputIterator self_type; ///< My own type - typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another - typedef ValueType value_type; ///< The type of the element the iterator can point to - typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to - typedef ValueType reference; ///< The type of a reference to an element the iterator can point to - -#if (THRUST_VERSION >= 100700) - // Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods - typedef typename thrust::detail::iterator_facade_category< - thrust::any_system_tag, - thrust::random_access_traversal_tag, - value_type, - reference - >::type iterator_category; ///< The iterator category -#else - typedef std::random_access_iterator_tag iterator_category; ///< The iterator category -#endif // THRUST_VERSION - -private: - - ValueType val; - -public: - - /// Constructor - __host__ __device__ __forceinline__ CountingInputIterator( - const ValueType &val) ///< Starting value for the iterator instance to report - : - val(val) - {} - - /// Postfix increment - __host__ __device__ __forceinline__ self_type operator++(int) - { - self_type retval = *this; - val++; - return retval; - } - - /// Prefix increment - __host__ __device__ __forceinline__ self_type operator++() - { - val++; - return *this; - } - - /// Indirection - __host__ __device__ __forceinline__ reference operator*() const - { - return val; - } - - /// Addition - template - __host__ __device__ __forceinline__ self_type operator+(Distance n) const - { - self_type retval(val + (ValueType) n); - return retval; - } - - /// Addition assignment - template - __host__ __device__ __forceinline__ self_type& operator+=(Distance n) - { - val += (ValueType) n; - return *this; - } - - /// Subtraction - template - __host__ __device__ __forceinline__ self_type operator-(Distance n) const - { - self_type retval(val - (ValueType) n); - return retval; - } - - /// Subtraction assignment - template - __host__ __device__ __forceinline__ self_type& operator-=(Distance n) - { - val -= n; - return *this; - } - - /// Distance - __host__ __device__ __forceinline__ difference_type operator-(self_type other) const - { - return (difference_type) (val - other.val); - } - - /// Array subscript - template - __host__ __device__ __forceinline__ reference operator[](Distance n) const - { - return val + (ValueType) n; - } - - /// Structure dereference - __host__ __device__ __forceinline__ pointer operator->() - { - return &val; - } - - /// Equal to - __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) - { - return (val == rhs.val); - } - - /// Not equal to - __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) - { - return (val != rhs.val); - } - - /// ostream operator - friend std::ostream& operator<<(std::ostream& os, const self_type& itr) - { - os << "[" << itr.val << "]"; - return os; - } - -}; - - - -/** @} */ // end group UtilIterator - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/iterator/discard_output_iterator.cuh b/ml-xgboost/cub/cub/iterator/discard_output_iterator.cuh deleted file mode 100644 index 9603a32..0000000 --- a/ml-xgboost/cub/cub/iterator/discard_output_iterator.cuh +++ /dev/null @@ -1,222 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Random-access iterator types - */ - -#pragma once - -#include -#include - -#include - -#include "../util_namespace.cuh" -#include "../util_macro.cuh" - -#if (THRUST_VERSION >= 100700) - // This iterator is compatible with Thrust API 1.7 and newer - #include - #include -#endif // THRUST_VERSION - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup UtilIterator - * @{ - */ - - -/** - * \brief A discard iterator - */ -template -class DiscardOutputIterator -{ -public: - - // Required iterator traits - typedef DiscardOutputIterator self_type; ///< My own type - typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another - typedef void value_type; ///< The type of the element the iterator can point to - typedef void pointer; ///< The type of a pointer to an element the iterator can point to - typedef void reference; ///< The type of a reference to an element the iterator can point to - -#if (THRUST_VERSION >= 100700) - // Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods - typedef typename thrust::detail::iterator_facade_category< - thrust::any_system_tag, - thrust::random_access_traversal_tag, - value_type, - reference - >::type iterator_category; ///< The iterator category -#else - typedef std::random_access_iterator_tag iterator_category; ///< The iterator category -#endif // THRUST_VERSION - -private: - - OffsetT offset; - -#if defined(_WIN32) || !defined(_WIN64) - // Workaround for win32 parameter-passing bug (ulonglong2 argmin DeviceReduce) - OffsetT pad[CUB_MAX(1, (16 / sizeof(OffsetT) - 1))]; -#endif - -public: - - /// Constructor - __host__ __device__ __forceinline__ DiscardOutputIterator( - OffsetT offset = 0) ///< Base offset - : - offset(offset) - {} - - /// Postfix increment - __host__ __device__ __forceinline__ self_type operator++(int) - { - self_type retval = *this; - offset++; - return retval; - } - - /// Prefix increment - __host__ __device__ __forceinline__ self_type operator++() - { - offset++; - return *this; - } - - /// Indirection - __host__ __device__ __forceinline__ self_type& operator*() - { - // return self reference, which can be assigned to anything - return *this; - } - - /// Addition - template - __host__ __device__ __forceinline__ self_type operator+(Distance n) const - { - self_type retval(offset + n); - return retval; - } - - /// Addition assignment - template - __host__ __device__ __forceinline__ self_type& operator+=(Distance n) - { - offset += n; - return *this; - } - - /// Subtraction - template - __host__ __device__ __forceinline__ self_type operator-(Distance n) const - { - self_type retval(offset - n); - return retval; - } - - /// Subtraction assignment - template - __host__ __device__ __forceinline__ self_type& operator-=(Distance n) - { - offset -= n; - return *this; - } - - /// Distance - __host__ __device__ __forceinline__ difference_type operator-(self_type other) const - { - return offset - other.offset; - } - - /// Array subscript - template - __host__ __device__ __forceinline__ self_type& operator[](Distance n) - { - // return self reference, which can be assigned to anything - return *this; - } - - /// Structure dereference - __host__ __device__ __forceinline__ pointer operator->() - { - return; - } - - /// Assignment to self (no-op) - __host__ __device__ __forceinline__ void operator=(self_type const& other) - { - offset = other.offset; - } - - /// Assignment to anything else (no-op) - template - __host__ __device__ __forceinline__ void operator=(T const&) - {} - - /// Cast to void* operator - __host__ __device__ __forceinline__ operator void*() const { return NULL; } - - /// Equal to - __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) - { - return (offset == rhs.offset); - } - - /// Not equal to - __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) - { - return (offset != rhs.offset); - } - - /// ostream operator - friend std::ostream& operator<<(std::ostream& os, const self_type& itr) - { - os << "[" << itr.offset << "]"; - return os; - } - -}; - - -/** @} */ // end group UtilIterator - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/iterator/tex_obj_input_iterator.cuh b/ml-xgboost/cub/cub/iterator/tex_obj_input_iterator.cuh deleted file mode 100644 index 7ae72ad..0000000 --- a/ml-xgboost/cub/cub/iterator/tex_obj_input_iterator.cuh +++ /dev/null @@ -1,310 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Random-access iterator types - */ - -#pragma once - -#include -#include - -#include "../thread/thread_load.cuh" -#include "../thread/thread_store.cuh" -#include "../util_device.cuh" -#include "../util_debug.cuh" -#include "../util_namespace.cuh" - -#if (THRUST_VERSION >= 100700) - // This iterator is compatible with Thrust API 1.7 and newer - #include - #include -#endif // THRUST_VERSION - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \addtogroup UtilIterator - * @{ - */ - - - -/** - * \brief A random-access input wrapper for dereferencing array values through texture cache. Uses newer Kepler-style texture objects. - * - * \par Overview - * - TexObjInputIteratorTwraps a native device pointer of type ValueType*. References - * to elements are to be loaded through texture cache. - * - Can be used to load any data type from memory through texture cache. - * - Can be manipulated and exchanged within and between host and device - * functions, can only be constructed within host functions, and can only be - * dereferenced within device functions. - * - With regard to nested/dynamic parallelism, TexObjInputIteratorTiterators may only be - * created by the host thread, but can be used by any descendant kernel. - * - Compatible with Thrust API v1.7 or newer. - * - * \par Snippet - * The code snippet below illustrates the use of \p TexRefInputIteratorTto - * dereference a device array of doubles through texture cache. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize a device array - * int num_items; // e.g., 7 - * double *d_in; // e.g., [8.0, 6.0, 7.0, 5.0, 3.0, 0.0, 9.0] - * - * // Create an iterator wrapper - * cub::TexObjInputIterator itr; - * itr.BindTexture(d_in, sizeof(double) * num_items); - * ... - * - * // Within device code: - * printf("%f\n", itr[0]); // 8.0 - * printf("%f\n", itr[1]); // 6.0 - * printf("%f\n", itr[6]); // 9.0 - * - * ... - * itr.UnbindTexture(); - * - * \endcode - * - * \tparam T The value type of this iterator - * \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t) - */ -template < - typename T, - typename OffsetT = ptrdiff_t> -class TexObjInputIterator -{ -public: - - // Required iterator traits - typedef TexObjInputIterator self_type; ///< My own type - typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another - typedef T value_type; ///< The type of the element the iterator can point to - typedef T* pointer; ///< The type of a pointer to an element the iterator can point to - typedef T reference; ///< The type of a reference to an element the iterator can point to - -#if (THRUST_VERSION >= 100700) - // Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods - typedef typename thrust::detail::iterator_facade_category< - thrust::device_system_tag, - thrust::random_access_traversal_tag, - value_type, - reference - >::type iterator_category; ///< The iterator category -#else - typedef std::random_access_iterator_tag iterator_category; ///< The iterator category -#endif // THRUST_VERSION - -private: - - // Largest texture word we can use in device - typedef typename UnitWord::TextureWord TextureWord; - - // Number of texture words per T - enum { - TEXTURE_MULTIPLE = sizeof(T) / sizeof(TextureWord) - }; - -private: - - T* ptr; - difference_type tex_offset; - cudaTextureObject_t tex_obj; - -public: - - /// Constructor - __host__ __device__ __forceinline__ TexObjInputIterator() - : - ptr(NULL), - tex_offset(0), - tex_obj(0) - {} - - /// Use this iterator to bind \p ptr with a texture reference - template - cudaError_t BindTexture( - QualifiedT *ptr, ///< Native pointer to wrap that is aligned to cudaDeviceProp::textureAlignment - size_t bytes = size_t(-1), ///< Number of bytes in the range - size_t tex_offset = 0) ///< OffsetT (in items) from \p ptr denoting the position of the iterator - { - this->ptr = const_cast::Type *>(ptr); - this->tex_offset = tex_offset; - - cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc(); - cudaResourceDesc res_desc; - cudaTextureDesc tex_desc; - memset(&res_desc, 0, sizeof(cudaResourceDesc)); - memset(&tex_desc, 0, sizeof(cudaTextureDesc)); - res_desc.resType = cudaResourceTypeLinear; - res_desc.res.linear.devPtr = this->ptr; - res_desc.res.linear.desc = channel_desc; - res_desc.res.linear.sizeInBytes = bytes; - tex_desc.readMode = cudaReadModeElementType; - return cudaCreateTextureObject(&tex_obj, &res_desc, &tex_desc, NULL); - } - - /// Unbind this iterator from its texture reference - cudaError_t UnbindTexture() - { - return cudaDestroyTextureObject(tex_obj); - } - - /// Postfix increment - __host__ __device__ __forceinline__ self_type operator++(int) - { - self_type retval = *this; - tex_offset++; - return retval; - } - - /// Prefix increment - __host__ __device__ __forceinline__ self_type operator++() - { - tex_offset++; - return *this; - } - - /// Indirection - __host__ __device__ __forceinline__ reference operator*() const - { -#if (CUB_PTX_ARCH == 0) - // Simply dereference the pointer on the host - return ptr[tex_offset]; -#else - // Move array of uninitialized words, then alias and assign to return value - TextureWord words[TEXTURE_MULTIPLE]; - - #pragma unroll - for (int i = 0; i < TEXTURE_MULTIPLE; ++i) - { - words[i] = tex1Dfetch( - tex_obj, - (tex_offset * TEXTURE_MULTIPLE) + i); - } - - // Load from words - return *reinterpret_cast(words); -#endif - } - - /// Addition - template - __host__ __device__ __forceinline__ self_type operator+(Distance n) const - { - self_type retval; - retval.ptr = ptr; - retval.tex_obj = tex_obj; - retval.tex_offset = tex_offset + n; - return retval; - } - - /// Addition assignment - template - __host__ __device__ __forceinline__ self_type& operator+=(Distance n) - { - tex_offset += n; - return *this; - } - - /// Subtraction - template - __host__ __device__ __forceinline__ self_type operator-(Distance n) const - { - self_type retval; - retval.ptr = ptr; - retval.tex_obj = tex_obj; - retval.tex_offset = tex_offset - n; - return retval; - } - - /// Subtraction assignment - template - __host__ __device__ __forceinline__ self_type& operator-=(Distance n) - { - tex_offset -= n; - return *this; - } - - /// Distance - __host__ __device__ __forceinline__ difference_type operator-(self_type other) const - { - return tex_offset - other.tex_offset; - } - - /// Array subscript - template - __host__ __device__ __forceinline__ reference operator[](Distance n) const - { - self_type offset = (*this) + n; - return *offset; - } - - /// Structure dereference - __host__ __device__ __forceinline__ pointer operator->() - { - return &(*(*this)); - } - - /// Equal to - __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) - { - return ((ptr == rhs.ptr) && (tex_offset == rhs.tex_offset) && (tex_obj == rhs.tex_obj)); - } - - /// Not equal to - __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) - { - return ((ptr != rhs.ptr) || (tex_offset != rhs.tex_offset) || (tex_obj != rhs.tex_obj)); - } - - /// ostream operator - friend std::ostream& operator<<(std::ostream& os, const self_type& itr) - { - return os; - } - -}; - - - -/** @} */ // end group UtilIterator - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/iterator/tex_ref_input_iterator.cuh b/ml-xgboost/cub/cub/iterator/tex_ref_input_iterator.cuh deleted file mode 100644 index f0fca2c..0000000 --- a/ml-xgboost/cub/cub/iterator/tex_ref_input_iterator.cuh +++ /dev/null @@ -1,374 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Random-access iterator types - */ - -#pragma once - -#include -#include - -#include "../thread/thread_load.cuh" -#include "../thread/thread_store.cuh" -#include "../util_device.cuh" -#include "../util_debug.cuh" -#include "../util_namespace.cuh" - -#if (CUDA_VERSION >= 5050) || defined(DOXYGEN_ACTIVE) // This iterator is compatible with CUDA 5.5 and newer - -#if (THRUST_VERSION >= 100700) // This iterator is compatible with Thrust API 1.7 and newer - #include - #include -#endif // THRUST_VERSION - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/****************************************************************************** - * Static file-scope Tesla/Fermi-style texture references - *****************************************************************************/ - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -// Anonymous namespace -namespace { - -/// Global texture reference specialized by type -template -struct IteratorTexRef -{ - /// And by unique ID - template - struct TexId - { - // Largest texture word we can use in device - typedef typename UnitWord::DeviceWord DeviceWord; - typedef typename UnitWord::TextureWord TextureWord; - - // Number of texture words per T - enum { - DEVICE_MULTIPLE = sizeof(T) / sizeof(DeviceWord), - TEXTURE_MULTIPLE = sizeof(T) / sizeof(TextureWord) - }; - - // Texture reference type - typedef texture TexRef; - - // Texture reference - static TexRef ref; - - /// Bind texture - static cudaError_t BindTexture(void *d_in, size_t &offset) - { - if (d_in) - { - cudaChannelFormatDesc tex_desc = cudaCreateChannelDesc(); - ref.channelDesc = tex_desc; - return (CubDebug(cudaBindTexture(&offset, ref, d_in))); - } - - return cudaSuccess; - } - - /// Unbind texture - static cudaError_t UnbindTexture() - { - return CubDebug(cudaUnbindTexture(ref)); - } - - /// Fetch element - template - static __device__ __forceinline__ T Fetch(Distance tex_offset) - { - DeviceWord temp[DEVICE_MULTIPLE]; - TextureWord *words = reinterpret_cast(temp); - - #pragma unroll - for (int i = 0; i < TEXTURE_MULTIPLE; ++i) - { - words[i] = tex1Dfetch(ref, (tex_offset * TEXTURE_MULTIPLE) + i); - } - - return reinterpret_cast(temp); - } - }; -}; - -// Texture reference definitions -template -template -typename IteratorTexRef::template TexId::TexRef IteratorTexRef::template TexId::ref = 0; - - -} // Anonymous namespace - - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - - -/** - * \addtogroup UtilIterator - * @{ - */ - - - -/** - * \brief A random-access input wrapper for dereferencing array values through texture cache. Uses older Tesla/Fermi-style texture references. - * - * \par Overview - * - TexRefInputIteratorTwraps a native device pointer of type ValueType*. References - * to elements are to be loaded through texture cache. - * - Can be used to load any data type from memory through texture cache. - * - Can be manipulated and exchanged within and between host and device - * functions, can only be constructed within host functions, and can only be - * dereferenced within device functions. - * - The \p UNIQUE_ID template parameter is used to statically name the underlying texture - * reference. Only one TexRefInputIteratorTinstance can be bound at any given time for a - * specific combination of (1) data type \p T, (2) \p UNIQUE_ID, (3) host - * thread, and (4) compilation .o unit. - * - With regard to nested/dynamic parallelism, TexRefInputIteratorTiterators may only be - * created by the host thread and used by a top-level kernel (i.e. the one which is launched - * from the host). - * - Compatible with Thrust API v1.7 or newer. - * - Compatible with CUDA toolkit v5.5 or newer. - * - * \par Snippet - * The code snippet below illustrates the use of \p TexRefInputIteratorTto - * dereference a device array of doubles through texture cache. - * \par - * \code - * #include // or equivalently - * - * // Declare, allocate, and initialize a device array - * int num_items; // e.g., 7 - * double *d_in; // e.g., [8.0, 6.0, 7.0, 5.0, 3.0, 0.0, 9.0] - * - * // Create an iterator wrapper - * cub::TexRefInputIterator itr; - * itr.BindTexture(d_in, sizeof(double) * num_items); - * ... - * - * // Within device code: - * printf("%f\n", itr[0]); // 8.0 - * printf("%f\n", itr[1]); // 6.0 - * printf("%f\n", itr[6]); // 9.0 - * - * ... - * itr.UnbindTexture(); - * - * \endcode - * - * \tparam T The value type of this iterator - * \tparam UNIQUE_ID A globally-unique identifier (within the compilation unit) to name the underlying texture reference - * \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t) - */ -template < - typename T, - int UNIQUE_ID, - typename OffsetT = ptrdiff_t> -class TexRefInputIterator -{ -public: - - // Required iterator traits - typedef TexRefInputIterator self_type; ///< My own type - typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another - typedef T value_type; ///< The type of the element the iterator can point to - typedef T* pointer; ///< The type of a pointer to an element the iterator can point to - typedef T reference; ///< The type of a reference to an element the iterator can point to - -#if (THRUST_VERSION >= 100700) - // Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods - typedef typename thrust::detail::iterator_facade_category< - thrust::device_system_tag, - thrust::random_access_traversal_tag, - value_type, - reference - >::type iterator_category; ///< The iterator category -#else - typedef std::random_access_iterator_tag iterator_category; ///< The iterator category -#endif // THRUST_VERSION - -private: - - T* ptr; - difference_type tex_offset; - - // Texture reference wrapper (old Tesla/Fermi-style textures) - typedef typename IteratorTexRef::template TexId TexId; - -public: -/* - /// Constructor - __host__ __device__ __forceinline__ TexRefInputIterator() - : - ptr(NULL), - tex_offset(0) - {} -*/ - /// Use this iterator to bind \p ptr with a texture reference - template - cudaError_t BindTexture( - QualifiedT *ptr, ///< Native pointer to wrap that is aligned to cudaDeviceProp::textureAlignment - size_t bytes = size_t(-1), ///< Number of bytes in the range - size_t tex_offset = 0) ///< OffsetT (in items) from \p ptr denoting the position of the iterator - { - this->ptr = const_cast::Type *>(ptr); - size_t offset; - cudaError_t retval = TexId::BindTexture(this->ptr + tex_offset, offset); - this->tex_offset = (difference_type) (offset / sizeof(QualifiedT)); - return retval; - } - - /// Unbind this iterator from its texture reference - cudaError_t UnbindTexture() - { - return TexId::UnbindTexture(); - } - - /// Postfix increment - __host__ __device__ __forceinline__ self_type operator++(int) - { - self_type retval = *this; - tex_offset++; - return retval; - } - - /// Prefix increment - __host__ __device__ __forceinline__ self_type operator++() - { - tex_offset++; - return *this; - } - - /// Indirection - __host__ __device__ __forceinline__ reference operator*() const - { -#if (CUB_PTX_ARCH == 0) - // Simply dereference the pointer on the host - return ptr[tex_offset]; -#else - // Use the texture reference - return TexId::Fetch(tex_offset); -#endif - } - - /// Addition - template - __host__ __device__ __forceinline__ self_type operator+(Distance n) const - { - self_type retval; - retval.ptr = ptr; - retval.tex_offset = tex_offset + n; - return retval; - } - - /// Addition assignment - template - __host__ __device__ __forceinline__ self_type& operator+=(Distance n) - { - tex_offset += n; - return *this; - } - - /// Subtraction - template - __host__ __device__ __forceinline__ self_type operator-(Distance n) const - { - self_type retval; - retval.ptr = ptr; - retval.tex_offset = tex_offset - n; - return retval; - } - - /// Subtraction assignment - template - __host__ __device__ __forceinline__ self_type& operator-=(Distance n) - { - tex_offset -= n; - return *this; - } - - /// Distance - __host__ __device__ __forceinline__ difference_type operator-(self_type other) const - { - return tex_offset - other.tex_offset; - } - - /// Array subscript - template - __host__ __device__ __forceinline__ reference operator[](Distance n) const - { - self_type offset = (*this) + n; - return *offset; - } - - /// Structure dereference - __host__ __device__ __forceinline__ pointer operator->() - { - return &(*(*this)); - } - - /// Equal to - __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) - { - return ((ptr == rhs.ptr) && (tex_offset == rhs.tex_offset)); - } - - /// Not equal to - __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) - { - return ((ptr != rhs.ptr) || (tex_offset != rhs.tex_offset)); - } - - /// ostream operator - friend std::ostream& operator<<(std::ostream& os, const self_type& itr) - { - return os; - } - -}; - - - -/** @} */ // end group UtilIterator - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) - -#endif // CUDA_VERSION diff --git a/ml-xgboost/cub/cub/iterator/transform_input_iterator.cuh b/ml-xgboost/cub/cub/iterator/transform_input_iterator.cuh deleted file mode 100644 index bf961f7..0000000 --- a/ml-xgboost/cub/cub/iterator/transform_input_iterator.cuh +++ /dev/null @@ -1,252 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Random-access iterator types - */ - -#pragma once - -#include -#include - -#include "../thread/thread_load.cuh" -#include "../thread/thread_store.cuh" -#include "../util_device.cuh" -#include "../util_namespace.cuh" - -#if (THRUST_VERSION >= 100700) - // This iterator is compatible with Thrust API 1.7 and newer - #include - #include -#endif // THRUST_VERSION - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \addtogroup UtilIterator - * @{ - */ - - -/** - * \brief A random-access input wrapper for transforming dereferenced values. - * - * \par Overview - * - TransformInputIteratorTwraps a unary conversion functor of type \p - * ConversionOp and a random-access input iterator of type InputIteratorT, - * using the former to produce references of type \p ValueType from the latter. - * - Can be used with any data type. - * - Can be constructed, manipulated, and exchanged within and between host and device - * functions. Wrapped host memory can only be dereferenced on the host, and wrapped - * device memory can only be dereferenced on the device. - * - Compatible with Thrust API v1.7 or newer. - * - * \par Snippet - * The code snippet below illustrates the use of \p TransformInputIteratorTto - * dereference an array of integers, tripling the values and converting them to doubles. - * \par - * \code - * #include // or equivalently - * - * // Functor for tripling integer values and converting to doubles - * struct TripleDoubler - * { - * __host__ __device__ __forceinline__ - * double operator()(const int &a) const { - * return double(a * 2); - * } - * }; - * - * // Declare, allocate, and initialize a device array - * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] - * TripleDoubler conversion_op; - * - * // Create an iterator wrapper - * cub::TransformInputIterator itr(d_in, conversion_op); - * - * // Within device code: - * printf("%f\n", itr[0]); // 24.0 - * printf("%f\n", itr[1]); // 18.0 - * printf("%f\n", itr[6]); // 27.0 - * - * \endcode - * - * \tparam ValueType The value type of this iterator - * \tparam ConversionOp Unary functor type for mapping objects of type \p InputType to type \p ValueType. Must have member ValueType operator()(const InputType &datum). - * \tparam InputIteratorT The type of the wrapped input iterator - * \tparam OffsetT The difference type of this iterator (Default: \p ptrdiff_t) - * - */ -template < - typename ValueType, - typename ConversionOp, - typename InputIteratorT, - typename OffsetT = ptrdiff_t> -class TransformInputIterator -{ -public: - - // Required iterator traits - typedef TransformInputIterator self_type; ///< My own type - typedef OffsetT difference_type; ///< Type to express the result of subtracting one iterator from another - typedef ValueType value_type; ///< The type of the element the iterator can point to - typedef ValueType* pointer; ///< The type of a pointer to an element the iterator can point to - typedef ValueType reference; ///< The type of a reference to an element the iterator can point to - -#if (THRUST_VERSION >= 100700) - // Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods - typedef typename thrust::detail::iterator_facade_category< - thrust::any_system_tag, - thrust::random_access_traversal_tag, - value_type, - reference - >::type iterator_category; ///< The iterator category -#else - typedef std::random_access_iterator_tag iterator_category; ///< The iterator category -#endif // THRUST_VERSION - -private: - - ConversionOp conversion_op; - InputIteratorT input_itr; - -public: - - /// Constructor - __host__ __device__ __forceinline__ TransformInputIterator( - InputIteratorT input_itr, ///< Input iterator to wrap - ConversionOp conversion_op) ///< Conversion functor to wrap - : - conversion_op(conversion_op), - input_itr(input_itr) - {} - - /// Postfix increment - __host__ __device__ __forceinline__ self_type operator++(int) - { - self_type retval = *this; - input_itr++; - return retval; - } - - /// Prefix increment - __host__ __device__ __forceinline__ self_type operator++() - { - input_itr++; - return *this; - } - - /// Indirection - __host__ __device__ __forceinline__ reference operator*() const - { - return conversion_op(*input_itr); - } - - /// Addition - template - __host__ __device__ __forceinline__ self_type operator+(Distance n) const - { - self_type retval(input_itr + n, conversion_op); - return retval; - } - - /// Addition assignment - template - __host__ __device__ __forceinline__ self_type& operator+=(Distance n) - { - input_itr += n; - return *this; - } - - /// Subtraction - template - __host__ __device__ __forceinline__ self_type operator-(Distance n) const - { - self_type retval(input_itr - n, conversion_op); - return retval; - } - - /// Subtraction assignment - template - __host__ __device__ __forceinline__ self_type& operator-=(Distance n) - { - input_itr -= n; - return *this; - } - - /// Distance - __host__ __device__ __forceinline__ difference_type operator-(self_type other) const - { - return input_itr - other.input_itr; - } - - /// Array subscript - template - __host__ __device__ __forceinline__ reference operator[](Distance n) const - { - return conversion_op(input_itr[n]); - } - - /// Structure dereference - __host__ __device__ __forceinline__ pointer operator->() - { - return &conversion_op(*input_itr); - } - - /// Equal to - __host__ __device__ __forceinline__ bool operator==(const self_type& rhs) - { - return (input_itr == rhs.input_itr); - } - - /// Not equal to - __host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) - { - return (input_itr != rhs.input_itr); - } - - /// ostream operator - friend std::ostream& operator<<(std::ostream& os, const self_type& itr) - { - return os; - } -}; - - - -/** @} */ // end group UtilIterator - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/thread/thread_load.cuh b/ml-xgboost/cub/cub/thread/thread_load.cuh deleted file mode 100644 index 6921a05..0000000 --- a/ml-xgboost/cub/cub/thread/thread_load.cuh +++ /dev/null @@ -1,438 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Thread utilities for reading memory using PTX cache modifiers. - */ - -#pragma once - -#include - -#include - -#include "../util_ptx.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \addtogroup UtilIo - * @{ - */ - -//----------------------------------------------------------------------------- -// Tags and constants -//----------------------------------------------------------------------------- - -/** - * \brief Enumeration of cache modifiers for memory load operations. - */ -enum CacheLoadModifier -{ - LOAD_DEFAULT, ///< Default (no modifier) - LOAD_CA, ///< Cache at all levels - LOAD_CG, ///< Cache at global level - LOAD_CS, ///< Cache streaming (likely to be accessed once) - LOAD_CV, ///< Cache as volatile (including cached system lines) - LOAD_LDG, ///< Cache as texture - LOAD_VOLATILE, ///< Volatile (any memory space) -}; - - -/** - * \name Thread I/O (cache modified) - * @{ - */ - -/** - * \brief Thread utility for reading memory using cub::CacheLoadModifier cache modifiers. Can be used to load any data type. - * - * \par Example - * \code - * #include // or equivalently - * - * // 32-bit load using cache-global modifier: - * int *d_in; - * int val = cub::ThreadLoad(d_in + threadIdx.x); - * - * // 16-bit load using default modifier - * short *d_in; - * short val = cub::ThreadLoad(d_in + threadIdx.x); - * - * // 256-bit load using cache-volatile modifier - * double4 *d_in; - * double4 val = cub::ThreadLoad(d_in + threadIdx.x); - * - * // 96-bit load using cache-streaming modifier - * struct TestFoo { bool a; short b; }; - * TestFoo *d_struct; - * TestFoo val = cub::ThreadLoad(d_in + threadIdx.x); - * \endcode - * - * \tparam MODIFIER [inferred] CacheLoadModifier enumeration - * \tparam InputIteratorT [inferred] Input iterator type \iterator - */ -template < - CacheLoadModifier MODIFIER, - typename InputIteratorT> -__device__ __forceinline__ typename std::iterator_traits::value_type ThreadLoad(InputIteratorT itr); - - -//@} end member group - - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - -/// Helper structure for templated load iteration (inductive case) -template -struct IterateThreadLoad -{ - template - static __device__ __forceinline__ void Load(T const *ptr, T *vals) - { - vals[COUNT] = ThreadLoad(ptr + COUNT); - IterateThreadLoad::template Load(ptr, vals); - } - - template - static __device__ __forceinline__ void Dereference(InputIteratorT itr, T *vals) - { - vals[COUNT] = itr[COUNT]; - IterateThreadLoad::Dereference(itr, vals); - } -}; - - -/// Helper structure for templated load iteration (termination case) -template -struct IterateThreadLoad -{ - template - static __device__ __forceinline__ void Load(T const * /*ptr*/, T * /*vals*/) {} - - template - static __device__ __forceinline__ void Dereference(InputIteratorT /*itr*/, T * /*vals*/) {} -}; - - -/** - * Define a uint4 (16B) ThreadLoad specialization for the given Cache load modifier - */ -#define _CUB_LOAD_16(cub_modifier, ptx_modifier) \ - template<> \ - __device__ __forceinline__ uint4 ThreadLoad(uint4 const *ptr) \ - { \ - uint4 retval; \ - asm volatile ("ld."#ptx_modifier".v4.u32 {%0, %1, %2, %3}, [%4];" : \ - "=r"(retval.x), \ - "=r"(retval.y), \ - "=r"(retval.z), \ - "=r"(retval.w) : \ - _CUB_ASM_PTR_(ptr)); \ - return retval; \ - } \ - template<> \ - __device__ __forceinline__ ulonglong2 ThreadLoad(ulonglong2 const *ptr) \ - { \ - ulonglong2 retval; \ - asm volatile ("ld."#ptx_modifier".v2.u64 {%0, %1}, [%2];" : \ - "=l"(retval.x), \ - "=l"(retval.y) : \ - _CUB_ASM_PTR_(ptr)); \ - return retval; \ - } - -/** - * Define a uint2 (8B) ThreadLoad specialization for the given Cache load modifier - */ -#define _CUB_LOAD_8(cub_modifier, ptx_modifier) \ - template<> \ - __device__ __forceinline__ ushort4 ThreadLoad(ushort4 const *ptr) \ - { \ - ushort4 retval; \ - asm volatile ("ld."#ptx_modifier".v4.u16 {%0, %1, %2, %3}, [%4];" : \ - "=h"(retval.x), \ - "=h"(retval.y), \ - "=h"(retval.z), \ - "=h"(retval.w) : \ - _CUB_ASM_PTR_(ptr)); \ - return retval; \ - } \ - template<> \ - __device__ __forceinline__ uint2 ThreadLoad(uint2 const *ptr) \ - { \ - uint2 retval; \ - asm volatile ("ld."#ptx_modifier".v2.u32 {%0, %1}, [%2];" : \ - "=r"(retval.x), \ - "=r"(retval.y) : \ - _CUB_ASM_PTR_(ptr)); \ - return retval; \ - } \ - template<> \ - __device__ __forceinline__ unsigned long long ThreadLoad(unsigned long long const *ptr) \ - { \ - unsigned long long retval; \ - asm volatile ("ld."#ptx_modifier".u64 %0, [%1];" : \ - "=l"(retval) : \ - _CUB_ASM_PTR_(ptr)); \ - return retval; \ - } - -/** - * Define a uint (4B) ThreadLoad specialization for the given Cache load modifier - */ -#define _CUB_LOAD_4(cub_modifier, ptx_modifier) \ - template<> \ - __device__ __forceinline__ unsigned int ThreadLoad(unsigned int const *ptr) \ - { \ - unsigned int retval; \ - asm volatile ("ld."#ptx_modifier".u32 %0, [%1];" : \ - "=r"(retval) : \ - _CUB_ASM_PTR_(ptr)); \ - return retval; \ - } - - -/** - * Define a unsigned short (2B) ThreadLoad specialization for the given Cache load modifier - */ -#define _CUB_LOAD_2(cub_modifier, ptx_modifier) \ - template<> \ - __device__ __forceinline__ unsigned short ThreadLoad(unsigned short const *ptr) \ - { \ - unsigned short retval; \ - asm volatile ("ld."#ptx_modifier".u16 %0, [%1];" : \ - "=h"(retval) : \ - _CUB_ASM_PTR_(ptr)); \ - return retval; \ - } - - -/** - * Define an unsigned char (1B) ThreadLoad specialization for the given Cache load modifier - */ -#define _CUB_LOAD_1(cub_modifier, ptx_modifier) \ - template<> \ - __device__ __forceinline__ unsigned char ThreadLoad(unsigned char const *ptr) \ - { \ - unsigned short retval; \ - asm volatile ( \ - "{" \ - " .reg .u8 datum;" \ - " ld."#ptx_modifier".u8 datum, [%1];" \ - " cvt.u16.u8 %0, datum;" \ - "}" : \ - "=h"(retval) : \ - _CUB_ASM_PTR_(ptr)); \ - return (unsigned char) retval; \ - } - - -/** - * Define powers-of-two ThreadLoad specializations for the given Cache load modifier - */ -#define _CUB_LOAD_ALL(cub_modifier, ptx_modifier) \ - _CUB_LOAD_16(cub_modifier, ptx_modifier) \ - _CUB_LOAD_8(cub_modifier, ptx_modifier) \ - _CUB_LOAD_4(cub_modifier, ptx_modifier) \ - _CUB_LOAD_2(cub_modifier, ptx_modifier) \ - _CUB_LOAD_1(cub_modifier, ptx_modifier) \ - - -/** - * Define powers-of-two ThreadLoad specializations for the various Cache load modifiers - */ -#if CUB_PTX_ARCH >= 200 - _CUB_LOAD_ALL(LOAD_CA, ca) - _CUB_LOAD_ALL(LOAD_CG, cg) - _CUB_LOAD_ALL(LOAD_CS, cs) - _CUB_LOAD_ALL(LOAD_CV, cv) -#else - _CUB_LOAD_ALL(LOAD_CA, global) - // Use volatile to ensure coherent reads when this PTX is JIT'd to run on newer architectures with L1 - _CUB_LOAD_ALL(LOAD_CG, volatile.global) - _CUB_LOAD_ALL(LOAD_CS, global) - _CUB_LOAD_ALL(LOAD_CV, volatile.global) -#endif - -#if CUB_PTX_ARCH >= 350 - _CUB_LOAD_ALL(LOAD_LDG, global.nc) -#else - _CUB_LOAD_ALL(LOAD_LDG, global) -#endif - - -// Macro cleanup -#undef _CUB_LOAD_ALL -#undef _CUB_LOAD_1 -#undef _CUB_LOAD_2 -#undef _CUB_LOAD_4 -#undef _CUB_LOAD_8 -#undef _CUB_LOAD_16 - - - -/** - * ThreadLoad definition for LOAD_DEFAULT modifier on iterator types - */ -template -__device__ __forceinline__ typename std::iterator_traits::value_type ThreadLoad( - InputIteratorT itr, - Int2Type /*modifier*/, - Int2Type /*is_pointer*/) -{ - return *itr; -} - - -/** - * ThreadLoad definition for LOAD_DEFAULT modifier on pointer types - */ -template -__device__ __forceinline__ T ThreadLoad( - T *ptr, - Int2Type /*modifier*/, - Int2Type /*is_pointer*/) -{ - return *ptr; -} - - -/** - * ThreadLoad definition for LOAD_VOLATILE modifier on primitive pointer types - */ -template -__device__ __forceinline__ T ThreadLoadVolatilePointer( - T *ptr, - Int2Type /*is_primitive*/) -{ - T retval = *reinterpret_cast(ptr); - return retval; -} - - -/** - * ThreadLoad definition for LOAD_VOLATILE modifier on non-primitive pointer types - */ -template -__device__ __forceinline__ T ThreadLoadVolatilePointer( - T *ptr, - Int2Type /*is_primitive*/) -{ - typedef typename UnitWord::VolatileWord VolatileWord; // Word type for memcopying - - const int VOLATILE_MULTIPLE = sizeof(T) / sizeof(VolatileWord); -/* - VolatileWord words[VOLATILE_MULTIPLE]; - - IterateThreadLoad<0, VOLATILE_MULTIPLE>::Dereference( - reinterpret_cast(ptr), - words); - - return *reinterpret_cast(words); -*/ - - T retval; - VolatileWord *words = reinterpret_cast(&retval); - IterateThreadLoad<0, VOLATILE_MULTIPLE>::Dereference( - reinterpret_cast(ptr), - words); - return retval; -} - - -/** - * ThreadLoad definition for LOAD_VOLATILE modifier on pointer types - */ -template -__device__ __forceinline__ T ThreadLoad( - T *ptr, - Int2Type /*modifier*/, - Int2Type /*is_pointer*/) -{ - // Apply tags for partial-specialization - return ThreadLoadVolatilePointer(ptr, Int2Type::PRIMITIVE>()); -} - - -/** - * ThreadLoad definition for generic modifiers on pointer types - */ -template -__device__ __forceinline__ T ThreadLoad( - T const *ptr, - Int2Type /*modifier*/, - Int2Type /*is_pointer*/) -{ - typedef typename UnitWord::DeviceWord DeviceWord; - - const int DEVICE_MULTIPLE = sizeof(T) / sizeof(DeviceWord); - - DeviceWord words[DEVICE_MULTIPLE]; - - IterateThreadLoad<0, DEVICE_MULTIPLE>::template Load( - reinterpret_cast(const_cast(ptr)), - words); - - return *reinterpret_cast(words); -} - - -/** - * ThreadLoad definition for generic modifiers - */ -template < - CacheLoadModifier MODIFIER, - typename InputIteratorT> -__device__ __forceinline__ typename std::iterator_traits::value_type ThreadLoad(InputIteratorT itr) -{ - // Apply tags for partial-specialization - return ThreadLoad( - itr, - Int2Type(), - Int2Type::VALUE>()); -} - - - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - -/** @} */ // end group UtilIo - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/thread/thread_operators.cuh b/ml-xgboost/cub/cub/thread/thread_operators.cuh deleted file mode 100644 index 32e13f8..0000000 --- a/ml-xgboost/cub/cub/thread/thread_operators.cuh +++ /dev/null @@ -1,317 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Simple binary operator functor types - */ - -/****************************************************************************** - * Simple functor operators - ******************************************************************************/ - -#pragma once - -#include "../util_macro.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup UtilModule - * @{ - */ - -/** - * \brief Default equality functor - */ -struct Equality -{ - /// Boolean equality operator, returns (a == b) - template - __host__ __device__ __forceinline__ bool operator()(const T &a, const T &b) const - { - return a == b; - } -}; - - -/** - * \brief Default inequality functor - */ -struct Inequality -{ - /// Boolean inequality operator, returns (a != b) - template - __host__ __device__ __forceinline__ bool operator()(const T &a, const T &b) const - { - return a != b; - } -}; - - -/** - * \brief Inequality functor (wraps equality functor) - */ -template -struct InequalityWrapper -{ - /// Wrapped equality operator - EqualityOp op; - - /// Constructor - __host__ __device__ __forceinline__ - InequalityWrapper(EqualityOp op) : op(op) {} - - /// Boolean inequality operator, returns (a != b) - template - __host__ __device__ __forceinline__ bool operator()(const T &a, const T &b) - { - return !op(a, b); - } -}; - - -/** - * \brief Default sum functor - */ -struct Sum -{ - /// Boolean sum operator, returns a + b - template - __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const - { - return a + b; - } -}; - - -/** - * \brief Default max functor - */ -struct Max -{ - /// Boolean max operator, returns (a > b) ? a : b - template - __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const - { - return CUB_MAX(a, b); - } -}; - - -/** - * \brief Arg max functor (keeps the value and offset of the first occurrence of the larger item) - */ -struct ArgMax -{ - /// Boolean max operator, preferring the item having the smaller offset in case of ties - template - __host__ __device__ __forceinline__ KeyValuePair operator()( - const KeyValuePair &a, - const KeyValuePair &b) const - { -// Mooch BUG (device reduce argmax gk110 3.2 million random fp32) -// return ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) ? b : a; - - if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) - return b; - return a; - } -}; - - -/** - * \brief Default min functor - */ -struct Min -{ - /// Boolean min operator, returns (a < b) ? a : b - template - __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const - { - return CUB_MIN(a, b); - } -}; - - -/** - * \brief Arg min functor (keeps the value and offset of the first occurrence of the smallest item) - */ -struct ArgMin -{ - /// Boolean min operator, preferring the item having the smaller offset in case of ties - template - __host__ __device__ __forceinline__ KeyValuePair operator()( - const KeyValuePair &a, - const KeyValuePair &b) const - { -// Mooch BUG (device reduce argmax gk110 3.2 million random fp32) -// return ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) ? b : a; - - if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) - return b; - return a; - } -}; - - -/** - * \brief Default cast functor - */ -template -struct Cast -{ - /// Cast operator, returns (B) a - template - __host__ __device__ __forceinline__ B operator()(const A &a) const - { - return (B) a; - } -}; - - -/** - * \brief Binary operator wrapper for switching non-commutative scan arguments - */ -template -class SwizzleScanOp -{ -private: - - /// Wrapped scan operator - ScanOp scan_op; - -public: - - /// Constructor - __host__ __device__ __forceinline__ - SwizzleScanOp(ScanOp scan_op) : scan_op(scan_op) {} - - /// Switch the scan arguments - template - __host__ __device__ __forceinline__ - T operator()(const T &a, const T &b) - { - T _a(a); - T _b(b); - - return scan_op(_b, _a); - } -}; - - -/** - * \brief Reduce-by-segment functor. - * - * Given two cub::KeyValuePair inputs \p a and \p b and a - * binary associative combining operator \p f(const T &x, const T &y), - * an instance of this functor returns a cub::KeyValuePair whose \p key - * field is a.key + a.key, and whose \p value field - * is either b.value if b.key is non-zero, or f(a.value, b.value) otherwise. - * - * ReduceBySegmentOp is an associative, non-commutative binary combining operator - * for input sequences of cub::KeyValuePair pairings. Such - * sequences are typically used to represent a segmented set of values to be reduced - * and a corresponding set of {0,1}-valued integer "head flags" demarcating the - * first value of each segment. - * - */ -template ///< Binary reduction operator to apply to values -struct ReduceBySegmentOp -{ - /// Wrapped reduction operator - ReductionOpT op; - - /// Constructor - __host__ __device__ __forceinline__ ReduceBySegmentOp() {} - - /// Constructor - __host__ __device__ __forceinline__ ReduceBySegmentOp(ReductionOpT op) : op(op) {} - - /// Scan operator - template ///< KeyValuePair pairing of T (value) and OffsetT (head flag) - __host__ __device__ __forceinline__ KeyValuePairT operator()( - const KeyValuePairT &first, ///< First partial reduction - const KeyValuePairT &second) ///< Second partial reduction - { - KeyValuePairT retval; - retval.key = first.key + second.key; - retval.value = (second.key) ? - second.value : // The second partial reduction spans a segment reset, so it's value aggregate becomes the running aggregate - op(first.value, second.value); // The second partial reduction does not span a reset, so accumulate both into the running aggregate - return retval; - } -}; - - - -template ///< Binary reduction operator to apply to values -struct ReduceByKeyOp -{ - /// Wrapped reduction operator - ReductionOpT op; - - /// Constructor - __host__ __device__ __forceinline__ ReduceByKeyOp() {} - - /// Constructor - __host__ __device__ __forceinline__ ReduceByKeyOp(ReductionOpT op) : op(op) {} - - /// Scan operator - template - __host__ __device__ __forceinline__ KeyValuePairT operator()( - const KeyValuePairT &first, ///< First partial reduction - const KeyValuePairT &second) ///< Second partial reduction - { - KeyValuePairT retval = second; - - if (first.key == second.key) - retval.value = op(first.value, retval.value); - - return retval; - } -}; - - - - - - - -/** @} */ // end group UtilModule - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/thread/thread_reduce.cuh b/ml-xgboost/cub/cub/thread/thread_reduce.cuh deleted file mode 100644 index 525744a..0000000 --- a/ml-xgboost/cub/cub/thread/thread_reduce.cuh +++ /dev/null @@ -1,169 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Thread utilities for sequential reduction over statically-sized array types - */ - -#pragma once - -#include "../thread/thread_operators.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \addtogroup UtilModule - * @{ - */ - -/** - * \name Sequential reduction over statically-sized array types - * @{ - */ - - -template < - int LENGTH, - typename T, - typename ReductionOp> -__device__ __forceinline__ T ThreadReduce( - T* input, ///< [in] Input array - ReductionOp reduction_op, ///< [in] Binary reduction operator - T prefix, ///< [in] Prefix to seed reduction with - Int2Type /*length*/) -{ - T addend = *input; - prefix = reduction_op(prefix, addend); - - return ThreadReduce(input + 1, reduction_op, prefix, Int2Type()); -} - -template < - typename T, - typename ReductionOp> -__device__ __forceinline__ T ThreadReduce( - T* /*input*/, ///< [in] Input array - ReductionOp /*reduction_op*/, ///< [in] Binary reduction operator - T prefix, ///< [in] Prefix to seed reduction with - Int2Type<0> /*length*/) -{ - return prefix; -} - - -/** - * \brief Perform a sequential reduction over \p LENGTH elements of the \p input array, seeded with the specified \p prefix. The aggregate is returned. - * - * \tparam LENGTH LengthT of input array - * \tparam T [inferred] The data type to be reduced. - * \tparam ScanOp [inferred] Binary reduction operator type having member T operator()(const T &a, const T &b) - */ -template < - int LENGTH, - typename T, - typename ReductionOp> -__device__ __forceinline__ T ThreadReduce( - T* input, ///< [in] Input array - ReductionOp reduction_op, ///< [in] Binary reduction operator - T prefix) ///< [in] Prefix to seed reduction with -{ - return ThreadReduce(input, reduction_op, prefix, Int2Type()); -} - - -/** - * \brief Perform a sequential reduction over \p LENGTH elements of the \p input array. The aggregate is returned. - * - * \tparam LENGTH LengthT of input array - * \tparam T [inferred] The data type to be reduced. - * \tparam ScanOp [inferred] Binary reduction operator type having member T operator()(const T &a, const T &b) - */ -template < - int LENGTH, - typename T, - typename ReductionOp> -__device__ __forceinline__ T ThreadReduce( - T* input, ///< [in] Input array - ReductionOp reduction_op) ///< [in] Binary reduction operator -{ - T prefix = input[0]; - return ThreadReduce(input + 1, reduction_op, prefix); -} - - -/** - * \brief Perform a sequential reduction over the statically-sized \p input array, seeded with the specified \p prefix. The aggregate is returned. - * - * \tparam LENGTH [inferred] LengthT of \p input array - * \tparam T [inferred] The data type to be reduced. - * \tparam ScanOp [inferred] Binary reduction operator type having member T operator()(const T &a, const T &b) - */ -template < - int LENGTH, - typename T, - typename ReductionOp> -__device__ __forceinline__ T ThreadReduce( - T (&input)[LENGTH], ///< [in] Input array - ReductionOp reduction_op, ///< [in] Binary reduction operator - T prefix) ///< [in] Prefix to seed reduction with -{ - return ThreadReduce(input, reduction_op, prefix, Int2Type()); -} - - -/** - * \brief Serial reduction with the specified operator - * - * \tparam LENGTH [inferred] LengthT of \p input array - * \tparam T [inferred] The data type to be reduced. - * \tparam ScanOp [inferred] Binary reduction operator type having member T operator()(const T &a, const T &b) - */ -template < - int LENGTH, - typename T, - typename ReductionOp> -__device__ __forceinline__ T ThreadReduce( - T (&input)[LENGTH], ///< [in] Input array - ReductionOp reduction_op) ///< [in] Binary reduction operator -{ - return ThreadReduce((T*) input, reduction_op); -} - - -//@} end member group - -/** @} */ // end group UtilModule - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/thread/thread_scan.cuh b/ml-xgboost/cub/cub/thread/thread_scan.cuh deleted file mode 100644 index 312335a..0000000 --- a/ml-xgboost/cub/cub/thread/thread_scan.cuh +++ /dev/null @@ -1,283 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Thread utilities for sequential prefix scan over statically-sized array types - */ - -#pragma once - -#include "../thread/thread_operators.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \addtogroup UtilModule - * @{ - */ - -/** - * \name Sequential prefix scan over statically-sized array types - * @{ - */ - -template < - int LENGTH, - typename T, - typename ScanOp> -__device__ __forceinline__ T ThreadScanExclusive( - T inclusive, - T exclusive, - T *input, ///< [in] Input array - T *output, ///< [out] Output array (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - Int2Type /*length*/) -{ - T addend = *input; - inclusive = scan_op(exclusive, addend); - *output = exclusive; - exclusive = inclusive; - - return ThreadScanExclusive(inclusive, exclusive, input + 1, output + 1, scan_op, Int2Type()); -} - -template < - typename T, - typename ScanOp> -__device__ __forceinline__ T ThreadScanExclusive( - T inclusive, - T /*exclusive*/, - T * /*input*/, ///< [in] Input array - T * /*output*/, ///< [out] Output array (may be aliased to \p input) - ScanOp /*scan_op*/, ///< [in] Binary scan operator - Int2Type<0> /*length*/) -{ - return inclusive; -} - - -/** - * \brief Perform a sequential exclusive prefix scan over \p LENGTH elements of the \p input array, seeded with the specified \p prefix. The aggregate is returned. - * - * \tparam LENGTH LengthT of \p input and \p output arrays - * \tparam T [inferred] The data type to be scanned. - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ -template < - int LENGTH, - typename T, - typename ScanOp> -__device__ __forceinline__ T ThreadScanExclusive( - T *input, ///< [in] Input array - T *output, ///< [out] Output array (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T prefix, ///< [in] Prefix to seed scan with - bool apply_prefix = true) ///< [in] Whether or not the calling thread should apply its prefix. If not, the first output element is undefined. (Handy for preventing thread-0 from applying a prefix.) -{ - T inclusive = input[0]; - if (apply_prefix) - { - inclusive = scan_op(prefix, inclusive); - } - output[0] = prefix; - T exclusive = inclusive; - - return ThreadScanExclusive(inclusive, exclusive, input + 1, output + 1, scan_op, Int2Type()); -} - - -/** - * \brief Perform a sequential exclusive prefix scan over the statically-sized \p input array, seeded with the specified \p prefix. The aggregate is returned. - * - * \tparam LENGTH [inferred] LengthT of \p input and \p output arrays - * \tparam T [inferred] The data type to be scanned. - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ -template < - int LENGTH, - typename T, - typename ScanOp> -__device__ __forceinline__ T ThreadScanExclusive( - T (&input)[LENGTH], ///< [in] Input array - T (&output)[LENGTH], ///< [out] Output array (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T prefix, ///< [in] Prefix to seed scan with - bool apply_prefix = true) ///< [in] Whether or not the calling thread should apply its prefix. (Handy for preventing thread-0 from applying a prefix.) -{ - return ThreadScanExclusive((T*) input, (T*) output, scan_op, prefix, apply_prefix); -} - - - - - - - - - -template < - int LENGTH, - typename T, - typename ScanOp> -__device__ __forceinline__ T ThreadScanInclusive( - T inclusive, - T *input, ///< [in] Input array - T *output, ///< [out] Output array (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - Int2Type /*length*/) -{ - T addend = *input; - inclusive = scan_op(inclusive, addend); - output[0] = inclusive; - - return ThreadScanInclusive(inclusive, input + 1, output + 1, scan_op, Int2Type()); -} - -template < - typename T, - typename ScanOp> -__device__ __forceinline__ T ThreadScanInclusive( - T inclusive, - T * /*input*/, ///< [in] Input array - T * /*output*/, ///< [out] Output array (may be aliased to \p input) - ScanOp /*scan_op*/, ///< [in] Binary scan operator - Int2Type<0> /*length*/) -{ - return inclusive; -} - - -/** - * \brief Perform a sequential inclusive prefix scan over \p LENGTH elements of the \p input array. The aggregate is returned. - * - * \tparam LENGTH LengthT of \p input and \p output arrays - * \tparam T [inferred] The data type to be scanned. - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ -template < - int LENGTH, - typename T, - typename ScanOp> -__device__ __forceinline__ T ThreadScanInclusive( - T *input, ///< [in] Input array - T *output, ///< [out] Output array (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan operator -{ - T inclusive = input[0]; - output[0] = inclusive; - - // Continue scan - return ThreadScanInclusive(inclusive, input + 1, output + 1, scan_op, Int2Type()); -} - - -/** - * \brief Perform a sequential inclusive prefix scan over the statically-sized \p input array. The aggregate is returned. - * - * \tparam LENGTH [inferred] LengthT of \p input and \p output arrays - * \tparam T [inferred] The data type to be scanned. - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ -template < - int LENGTH, - typename T, - typename ScanOp> -__device__ __forceinline__ T ThreadScanInclusive( - T (&input)[LENGTH], ///< [in] Input array - T (&output)[LENGTH], ///< [out] Output array (may be aliased to \p input) - ScanOp scan_op) ///< [in] Binary scan operator -{ - return ThreadScanInclusive((T*) input, (T*) output, scan_op); -} - - -/** - * \brief Perform a sequential inclusive prefix scan over \p LENGTH elements of the \p input array, seeded with the specified \p prefix. The aggregate is returned. - * - * \tparam LENGTH LengthT of \p input and \p output arrays - * \tparam T [inferred] The data type to be scanned. - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ -template < - int LENGTH, - typename T, - typename ScanOp> -__device__ __forceinline__ T ThreadScanInclusive( - T *input, ///< [in] Input array - T *output, ///< [out] Output array (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T prefix, ///< [in] Prefix to seed scan with - bool apply_prefix = true) ///< [in] Whether or not the calling thread should apply its prefix. (Handy for preventing thread-0 from applying a prefix.) -{ - T inclusive = input[0]; - if (apply_prefix) - { - inclusive = scan_op(prefix, inclusive); - } - output[0] = inclusive; - - // Continue scan - return ThreadScanInclusive(inclusive, input + 1, output + 1, scan_op, Int2Type()); -} - - -/** - * \brief Perform a sequential inclusive prefix scan over the statically-sized \p input array, seeded with the specified \p prefix. The aggregate is returned. - * - * \tparam LENGTH [inferred] LengthT of \p input and \p output arrays - * \tparam T [inferred] The data type to be scanned. - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ -template < - int LENGTH, - typename T, - typename ScanOp> -__device__ __forceinline__ T ThreadScanInclusive( - T (&input)[LENGTH], ///< [in] Input array - T (&output)[LENGTH], ///< [out] Output array (may be aliased to \p input) - ScanOp scan_op, ///< [in] Binary scan operator - T prefix, ///< [in] Prefix to seed scan with - bool apply_prefix = true) ///< [in] Whether or not the calling thread should apply its prefix. (Handy for preventing thread-0 from applying a prefix.) -{ - return ThreadScanInclusive((T*) input, (T*) output, scan_op, prefix, apply_prefix); -} - - -//@} end member group - -/** @} */ // end group UtilModule - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/thread/thread_search.cuh b/ml-xgboost/cub/cub/thread/thread_search.cuh deleted file mode 100644 index 6d2da00..0000000 --- a/ml-xgboost/cub/cub/thread/thread_search.cuh +++ /dev/null @@ -1,154 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Thread utilities for sequential search - */ - -#pragma once - -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * Computes the begin offsets into A and B for the specific diagonal - */ -template < - typename AIteratorT, - typename BIteratorT, - typename OffsetT, - typename CoordinateT> -__host__ __device__ __forceinline__ void MergePathSearch( - OffsetT diagonal, - AIteratorT a, - BIteratorT b, - OffsetT a_len, - OffsetT b_len, - CoordinateT& path_coordinate) -{ - /// The value type of the input iterator - typedef typename std::iterator_traits::value_type T; - - OffsetT split_min = CUB_MAX(diagonal - b_len, 0); - OffsetT split_max = CUB_MIN(diagonal, a_len); - - while (split_min < split_max) - { - OffsetT split_pivot = (split_min + split_max) >> 1; - if (a[split_pivot] <= b[diagonal - split_pivot - 1]) - { - // Move candidate split range up A, down B - split_min = split_pivot + 1; - } - else - { - // Move candidate split range up B, down A - split_max = split_pivot; - } - } - - path_coordinate.x = CUB_MIN(split_min, a_len); - path_coordinate.y = diagonal - split_min; -} - - - -/** - * \brief Returns the offset of the first value within \p input which does not compare less than \p val - */ -template < - typename InputIteratorT, - typename OffsetT, - typename T> -__device__ __forceinline__ OffsetT LowerBound( - InputIteratorT input, ///< [in] Input sequence - OffsetT num_items, ///< [in] Input sequence length - T val) ///< [in] Search key -{ - OffsetT retval = 0; - while (num_items > 0) - { - OffsetT half = num_items >> 1; - if (input[retval + half] < val) - { - retval = retval + (half + 1); - num_items = num_items - (half + 1); - } - else - { - num_items = half; - } - } - - return retval; -} - - -/** - * \brief Returns the offset of the first value within \p input which compares greater than \p val - */ -template < - typename InputIteratorT, - typename OffsetT, - typename T> -__device__ __forceinline__ OffsetT UpperBound( - InputIteratorT input, ///< [in] Input sequence - OffsetT num_items, ///< [in] Input sequence length - T val) ///< [in] Search key -{ - OffsetT retval = 0; - while (num_items > 0) - { - OffsetT half = num_items >> 1; - if (val < input[retval + half]) - { - num_items = half; - } - else - { - retval = retval + (half + 1); - num_items = num_items - (half + 1); - } - } - - return retval; -} - - - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/thread/thread_store.cuh b/ml-xgboost/cub/cub/thread/thread_store.cuh deleted file mode 100644 index ae37f9c..0000000 --- a/ml-xgboost/cub/cub/thread/thread_store.cuh +++ /dev/null @@ -1,422 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Thread utilities for writing memory using PTX cache modifiers. - */ - -#pragma once - -#include - -#include "../util_ptx.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \addtogroup UtilIo - * @{ - */ - - -//----------------------------------------------------------------------------- -// Tags and constants -//----------------------------------------------------------------------------- - -/** - * \brief Enumeration of cache modifiers for memory store operations. - */ -enum CacheStoreModifier -{ - STORE_DEFAULT, ///< Default (no modifier) - STORE_WB, ///< Cache write-back all coherent levels - STORE_CG, ///< Cache at global level - STORE_CS, ///< Cache streaming (likely to be accessed once) - STORE_WT, ///< Cache write-through (to system memory) - STORE_VOLATILE, ///< Volatile shared (any memory space) -}; - - -/** - * \name Thread I/O (cache modified) - * @{ - */ - -/** - * \brief Thread utility for writing memory using cub::CacheStoreModifier cache modifiers. Can be used to store any data type. - * - * \par Example - * \code - * #include // or equivalently - * - * // 32-bit store using cache-global modifier: - * int *d_out; - * int val; - * cub::ThreadStore(d_out + threadIdx.x, val); - * - * // 16-bit store using default modifier - * short *d_out; - * short val; - * cub::ThreadStore(d_out + threadIdx.x, val); - * - * // 256-bit store using write-through modifier - * double4 *d_out; - * double4 val; - * cub::ThreadStore(d_out + threadIdx.x, val); - * - * // 96-bit store using cache-streaming cache modifier - * struct TestFoo { bool a; short b; }; - * TestFoo *d_struct; - * TestFoo val; - * cub::ThreadStore(d_out + threadIdx.x, val); - * \endcode - * - * \tparam MODIFIER [inferred] CacheStoreModifier enumeration - * \tparam InputIteratorT [inferred] Output iterator type \iterator - * \tparam T [inferred] Data type of output value - */ -template < - CacheStoreModifier MODIFIER, - typename OutputIteratorT, - typename T> -__device__ __forceinline__ void ThreadStore(OutputIteratorT itr, T val); - - -//@} end member group - - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - -/// Helper structure for templated store iteration (inductive case) -template -struct IterateThreadStore -{ - template - static __device__ __forceinline__ void Store(T *ptr, T *vals) - { - ThreadStore(ptr + COUNT, vals[COUNT]); - IterateThreadStore::template Store(ptr, vals); - } - - template - static __device__ __forceinline__ void Dereference(OutputIteratorT ptr, T *vals) - { - ptr[COUNT] = vals[COUNT]; - IterateThreadStore::Dereference(ptr, vals); - } - -}; - -/// Helper structure for templated store iteration (termination case) -template -struct IterateThreadStore -{ - template - static __device__ __forceinline__ void Store(T * /*ptr*/, T * /*vals*/) {} - - template - static __device__ __forceinline__ void Dereference(OutputIteratorT /*ptr*/, T * /*vals*/) {} -}; - - -/** - * Define a uint4 (16B) ThreadStore specialization for the given Cache load modifier - */ -#define _CUB_STORE_16(cub_modifier, ptx_modifier) \ - template<> \ - __device__ __forceinline__ void ThreadStore(uint4* ptr, uint4 val) \ - { \ - asm volatile ("st."#ptx_modifier".v4.u32 [%0], {%1, %2, %3, %4};" : : \ - _CUB_ASM_PTR_(ptr), \ - "r"(val.x), \ - "r"(val.y), \ - "r"(val.z), \ - "r"(val.w)); \ - } \ - template<> \ - __device__ __forceinline__ void ThreadStore(ulonglong2* ptr, ulonglong2 val) \ - { \ - asm volatile ("st."#ptx_modifier".v2.u64 [%0], {%1, %2};" : : \ - _CUB_ASM_PTR_(ptr), \ - "l"(val.x), \ - "l"(val.y)); \ - } - - -/** - * Define a uint2 (8B) ThreadStore specialization for the given Cache load modifier - */ -#define _CUB_STORE_8(cub_modifier, ptx_modifier) \ - template<> \ - __device__ __forceinline__ void ThreadStore(ushort4* ptr, ushort4 val) \ - { \ - asm volatile ("st."#ptx_modifier".v4.u16 [%0], {%1, %2, %3, %4};" : : \ - _CUB_ASM_PTR_(ptr), \ - "h"(val.x), \ - "h"(val.y), \ - "h"(val.z), \ - "h"(val.w)); \ - } \ - template<> \ - __device__ __forceinline__ void ThreadStore(uint2* ptr, uint2 val) \ - { \ - asm volatile ("st."#ptx_modifier".v2.u32 [%0], {%1, %2};" : : \ - _CUB_ASM_PTR_(ptr), \ - "r"(val.x), \ - "r"(val.y)); \ - } \ - template<> \ - __device__ __forceinline__ void ThreadStore(unsigned long long* ptr, unsigned long long val) \ - { \ - asm volatile ("st."#ptx_modifier".u64 [%0], %1;" : : \ - _CUB_ASM_PTR_(ptr), \ - "l"(val)); \ - } - -/** - * Define a unsigned int (4B) ThreadStore specialization for the given Cache load modifier - */ -#define _CUB_STORE_4(cub_modifier, ptx_modifier) \ - template<> \ - __device__ __forceinline__ void ThreadStore(unsigned int* ptr, unsigned int val) \ - { \ - asm volatile ("st."#ptx_modifier".u32 [%0], %1;" : : \ - _CUB_ASM_PTR_(ptr), \ - "r"(val)); \ - } - - -/** - * Define a unsigned short (2B) ThreadStore specialization for the given Cache load modifier - */ -#define _CUB_STORE_2(cub_modifier, ptx_modifier) \ - template<> \ - __device__ __forceinline__ void ThreadStore(unsigned short* ptr, unsigned short val) \ - { \ - asm volatile ("st."#ptx_modifier".u16 [%0], %1;" : : \ - _CUB_ASM_PTR_(ptr), \ - "h"(val)); \ - } - - -/** - * Define a unsigned char (1B) ThreadStore specialization for the given Cache load modifier - */ -#define _CUB_STORE_1(cub_modifier, ptx_modifier) \ - template<> \ - __device__ __forceinline__ void ThreadStore(unsigned char* ptr, unsigned char val) \ - { \ - asm volatile ( \ - "{" \ - " .reg .u8 datum;" \ - " cvt.u8.u16 datum, %1;" \ - " st."#ptx_modifier".u8 [%0], datum;" \ - "}" : : \ - _CUB_ASM_PTR_(ptr), \ - "h"((unsigned short) val)); \ - } - -/** - * Define powers-of-two ThreadStore specializations for the given Cache load modifier - */ -#define _CUB_STORE_ALL(cub_modifier, ptx_modifier) \ - _CUB_STORE_16(cub_modifier, ptx_modifier) \ - _CUB_STORE_8(cub_modifier, ptx_modifier) \ - _CUB_STORE_4(cub_modifier, ptx_modifier) \ - _CUB_STORE_2(cub_modifier, ptx_modifier) \ - _CUB_STORE_1(cub_modifier, ptx_modifier) \ - - -/** - * Define ThreadStore specializations for the various Cache load modifiers - */ -#if CUB_PTX_ARCH >= 200 - _CUB_STORE_ALL(STORE_WB, wb) - _CUB_STORE_ALL(STORE_CG, cg) - _CUB_STORE_ALL(STORE_CS, cs) - _CUB_STORE_ALL(STORE_WT, wt) -#else - _CUB_STORE_ALL(STORE_WB, global) - _CUB_STORE_ALL(STORE_CG, global) - _CUB_STORE_ALL(STORE_CS, global) - _CUB_STORE_ALL(STORE_WT, volatile.global) -#endif - - -// Macro cleanup -#undef _CUB_STORE_ALL -#undef _CUB_STORE_1 -#undef _CUB_STORE_2 -#undef _CUB_STORE_4 -#undef _CUB_STORE_8 -#undef _CUB_STORE_16 - - -/** - * ThreadStore definition for STORE_DEFAULT modifier on iterator types - */ -template -__device__ __forceinline__ void ThreadStore( - OutputIteratorT itr, - T val, - Int2Type /*modifier*/, - Int2Type /*is_pointer*/) -{ - *itr = val; -} - - -/** - * ThreadStore definition for STORE_DEFAULT modifier on pointer types - */ -template -__device__ __forceinline__ void ThreadStore( - T *ptr, - T val, - Int2Type /*modifier*/, - Int2Type /*is_pointer*/) -{ - *ptr = val; -} - - -/** - * ThreadStore definition for STORE_VOLATILE modifier on primitive pointer types - */ -template -__device__ __forceinline__ void ThreadStoreVolatilePtr( - T *ptr, - T val, - Int2Type /*is_primitive*/) -{ - *reinterpret_cast(ptr) = val; -} - - -/** - * ThreadStore definition for STORE_VOLATILE modifier on non-primitive pointer types - */ -template -__device__ __forceinline__ void ThreadStoreVolatilePtr( - T *ptr, - T val, - Int2Type /*is_primitive*/) -{ - // Create a temporary using shuffle-words, then store using volatile-words - typedef typename UnitWord::VolatileWord VolatileWord; - typedef typename UnitWord::ShuffleWord ShuffleWord; - - const int VOLATILE_MULTIPLE = sizeof(T) / sizeof(VolatileWord); - const int SHUFFLE_MULTIPLE = sizeof(T) / sizeof(ShuffleWord); - - VolatileWord words[VOLATILE_MULTIPLE]; - - #pragma unroll - for (int i = 0; i < SHUFFLE_MULTIPLE; ++i) - reinterpret_cast(words)[i] = reinterpret_cast(&val)[i]; - - IterateThreadStore<0, VOLATILE_MULTIPLE>::template Dereference( - reinterpret_cast(ptr), - words); -} - - -/** - * ThreadStore definition for STORE_VOLATILE modifier on pointer types - */ -template -__device__ __forceinline__ void ThreadStore( - T *ptr, - T val, - Int2Type /*modifier*/, - Int2Type /*is_pointer*/) -{ - ThreadStoreVolatilePtr(ptr, val, Int2Type::PRIMITIVE>()); -} - - -/** - * ThreadStore definition for generic modifiers on pointer types - */ -template -__device__ __forceinline__ void ThreadStore( - T *ptr, - T val, - Int2Type /*modifier*/, - Int2Type /*is_pointer*/) -{ - // Create a temporary using shuffle-words, then store using device-words - typedef typename UnitWord::DeviceWord DeviceWord; - typedef typename UnitWord::ShuffleWord ShuffleWord; - - const int DEVICE_MULTIPLE = sizeof(T) / sizeof(DeviceWord); - const int SHUFFLE_MULTIPLE = sizeof(T) / sizeof(ShuffleWord); - - DeviceWord words[DEVICE_MULTIPLE]; - - #pragma unroll - for (int i = 0; i < SHUFFLE_MULTIPLE; ++i) - reinterpret_cast(words)[i] = reinterpret_cast(&val)[i]; - - IterateThreadStore<0, DEVICE_MULTIPLE>::template Store( - reinterpret_cast(ptr), - words); -} - - -/** - * ThreadStore definition for generic modifiers - */ -template -__device__ __forceinline__ void ThreadStore(OutputIteratorT itr, T val) -{ - ThreadStore( - itr, - val, - Int2Type(), - Int2Type::VALUE>()); -} - - - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - -/** @} */ // end group UtilIo - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/util_allocator.cuh b/ml-xgboost/cub/cub/util_allocator.cuh deleted file mode 100644 index 50b5d5e..0000000 --- a/ml-xgboost/cub/cub/util_allocator.cuh +++ /dev/null @@ -1,708 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple caching allocator for device memory allocations. The allocator is - * thread-safe and capable of managing device allocations on multiple devices. - ******************************************************************************/ - -#pragma once - -#include "util_namespace.cuh" -#include "util_debug.cuh" - -#include -#include - -#include "host/mutex.cuh" -#include - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup UtilMgmt - * @{ - */ - - -/****************************************************************************** - * CachingDeviceAllocator (host use) - ******************************************************************************/ - -/** - * \brief A simple caching allocator for device memory allocations. - * - * \par Overview - * The allocator is thread-safe and stream-safe and is capable of managing cached - * device allocations on multiple devices. It behaves as follows: - * - * \par - * - Allocations from the allocator are associated with an \p active_stream. Once freed, - * the allocation becomes available immediately for reuse within the \p active_stream - * with which it was associated with during allocation, and it becomes available for - * reuse within other streams when all prior work submitted to \p active_stream has completed. - * - Allocations are categorized and cached by bin size. A new allocation request of - * a given size will only consider cached allocations within the corresponding bin. - * - Bin limits progress geometrically in accordance with the growth factor - * \p bin_growth provided during construction. Unused device allocations within - * a larger bin cache are not reused for allocation requests that categorize to - * smaller bin sizes. - * - Allocation requests below (\p bin_growth ^ \p min_bin) are rounded up to - * (\p bin_growth ^ \p min_bin). - * - Allocations above (\p bin_growth ^ \p max_bin) are not rounded up to the nearest - * bin and are simply freed when they are deallocated instead of being returned - * to a bin-cache. - * - %If the total storage of cached allocations on a given device will exceed - * \p max_cached_bytes, allocations for that device are simply freed when they are - * deallocated instead of being returned to their bin-cache. - * - * \par - * For example, the default-constructed CachingDeviceAllocator is configured with: - * - \p bin_growth = 8 - * - \p min_bin = 3 - * - \p max_bin = 7 - * - \p max_cached_bytes = 6MB - 1B - * - * \par - * which delineates five bin-sizes: 512B, 4KB, 32KB, 256KB, and 2MB - * and sets a maximum of 6,291,455 cached bytes per device - * - */ -struct CachingDeviceAllocator -{ - - //--------------------------------------------------------------------- - // Constants - //--------------------------------------------------------------------- - - /// Out-of-bounds bin - static const unsigned int INVALID_BIN = (unsigned int) -1; - - /// Invalid size - static const size_t INVALID_SIZE = (size_t) -1; - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - /// Invalid device ordinal - static const int INVALID_DEVICE_ORDINAL = -1; - - //--------------------------------------------------------------------- - // Type definitions and helper types - //--------------------------------------------------------------------- - - /** - * Descriptor for device memory allocations - */ - struct BlockDescriptor - { - void* d_ptr; // Device pointer - size_t bytes; // Size of allocation in bytes - unsigned int bin; // Bin enumeration - int device; // device ordinal - cudaStream_t associated_stream; // Associated associated_stream - cudaEvent_t ready_event; // Signal when associated stream has run to the point at which this block was freed - - // Constructor (suitable for searching maps for a specific block, given its pointer and device) - BlockDescriptor(void *d_ptr, int device) : - d_ptr(d_ptr), - bytes(0), - bin(INVALID_BIN), - device(device), - associated_stream(0), - ready_event(0) - {} - - // Constructor (suitable for searching maps for a range of suitable blocks, given a device) - BlockDescriptor(int device) : - d_ptr(NULL), - bytes(0), - bin(INVALID_BIN), - device(device), - associated_stream(0), - ready_event(0) - {} - - // Comparison functor for comparing device pointers - static bool PtrCompare(const BlockDescriptor &a, const BlockDescriptor &b) - { - if (a.device == b.device) - return (a.d_ptr < b.d_ptr); - else - return (a.device < b.device); - } - - // Comparison functor for comparing allocation sizes - static bool SizeCompare(const BlockDescriptor &a, const BlockDescriptor &b) - { - if (a.device == b.device) - return (a.bytes < b.bytes); - else - return (a.device < b.device); - } - }; - - /// BlockDescriptor comparator function interface - typedef bool (*Compare)(const BlockDescriptor &, const BlockDescriptor &); - - class TotalBytes { - public: - size_t free; - size_t live; - TotalBytes() { free = live = 0; } - }; - - /// Set type for cached blocks (ordered by size) - typedef std::multiset CachedBlocks; - - /// Set type for live blocks (ordered by ptr) - typedef std::multiset BusyBlocks; - - /// Map type of device ordinals to the number of cached bytes cached by each device - typedef std::map GpuCachedBytes; - - - //--------------------------------------------------------------------- - // Utility functions - //--------------------------------------------------------------------- - - /** - * Integer pow function for unsigned base and exponent - */ - static unsigned int IntPow( - unsigned int base, - unsigned int exp) - { - unsigned int retval = 1; - while (exp > 0) - { - if (exp & 1) { - retval = retval * base; // multiply the result by the current base - } - base = base * base; // square the base - exp = exp >> 1; // divide the exponent in half - } - return retval; - } - - - /** - * Round up to the nearest power-of - */ - void NearestPowerOf( - unsigned int &power, - size_t &rounded_bytes, - unsigned int base, - size_t value) - { - power = 0; - rounded_bytes = 1; - - if (value * base < value) - { - // Overflow - power = sizeof(size_t) * 8; - rounded_bytes = size_t(0) - 1; - return; - } - - while (rounded_bytes < value) - { - rounded_bytes *= base; - power++; - } - } - - - //--------------------------------------------------------------------- - // Fields - //--------------------------------------------------------------------- - - cub::Mutex mutex; /// Mutex for thread-safety - - unsigned int bin_growth; /// Geometric growth factor for bin-sizes - unsigned int min_bin; /// Minimum bin enumeration - unsigned int max_bin; /// Maximum bin enumeration - - size_t min_bin_bytes; /// Minimum bin size - size_t max_bin_bytes; /// Maximum bin size - size_t max_cached_bytes; /// Maximum aggregate cached bytes per device - - const bool skip_cleanup; /// Whether or not to skip a call to FreeAllCached() when destructor is called. (The CUDA runtime may have already shut down for statically declared allocators) - bool debug; /// Whether or not to print (de)allocation events to stdout - - GpuCachedBytes cached_bytes; /// Map of device ordinal to aggregate cached bytes on that device - CachedBlocks cached_blocks; /// Set of cached device allocations available for reuse - BusyBlocks live_blocks; /// Set of live device allocations currently in use - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - //--------------------------------------------------------------------- - // Methods - //--------------------------------------------------------------------- - - /** - * \brief Constructor. - */ - CachingDeviceAllocator( - unsigned int bin_growth, ///< Geometric growth factor for bin-sizes - unsigned int min_bin = 1, ///< Minimum bin (default is bin_growth ^ 1) - unsigned int max_bin = INVALID_BIN, ///< Maximum bin (default is no max bin) - size_t max_cached_bytes = INVALID_SIZE, ///< Maximum aggregate cached bytes per device (default is no limit) - bool skip_cleanup = false, ///< Whether or not to skip a call to \p FreeAllCached() when the destructor is called (default is to deallocate) - bool debug = false) ///< Whether or not to print (de)allocation events to stdout (default is no stderr output) - : - bin_growth(bin_growth), - min_bin(min_bin), - max_bin(max_bin), - min_bin_bytes(IntPow(bin_growth, min_bin)), - max_bin_bytes(IntPow(bin_growth, max_bin)), - max_cached_bytes(max_cached_bytes), - skip_cleanup(skip_cleanup), - debug(debug), - cached_blocks(BlockDescriptor::SizeCompare), - live_blocks(BlockDescriptor::PtrCompare) - {} - - - /** - * \brief Default constructor. - * - * Configured with: - * \par - * - \p bin_growth = 8 - * - \p min_bin = 3 - * - \p max_bin = 7 - * - \p max_cached_bytes = (\p bin_growth ^ \p max_bin) * 3) - 1 = 6,291,455 bytes - * - * which delineates five bin-sizes: 512B, 4KB, 32KB, 256KB, and 2MB and - * sets a maximum of 6,291,455 cached bytes per device - */ - CachingDeviceAllocator( - bool skip_cleanup = false, - bool debug = false) - : - bin_growth(8), - min_bin(3), - max_bin(7), - min_bin_bytes(IntPow(bin_growth, min_bin)), - max_bin_bytes(IntPow(bin_growth, max_bin)), - max_cached_bytes((max_bin_bytes * 3) - 1), - skip_cleanup(skip_cleanup), - debug(debug), - cached_blocks(BlockDescriptor::SizeCompare), - live_blocks(BlockDescriptor::PtrCompare) - {} - - - /** - * \brief Sets the limit on the number bytes this allocator is allowed to cache per device. - * - * Changing the ceiling of cached bytes does not cause any allocations (in-use or - * cached-in-reserve) to be freed. See \p FreeAllCached(). - */ - cudaError_t SetMaxCachedBytes( - size_t max_cached_bytes) - { - // Lock - mutex.Lock(); - - if (debug) _CubLog("Changing max_cached_bytes (%lld -> %lld)\n", (long long) this->max_cached_bytes, (long long) max_cached_bytes); - - this->max_cached_bytes = max_cached_bytes; - - // Unlock - mutex.Unlock(); - - return cudaSuccess; - } - - - /** - * \brief Provides a suitable allocation of device memory for the given size on the specified device. - * - * Once freed, the allocation becomes available immediately for reuse within the \p active_stream - * with which it was associated with during allocation, and it becomes available for reuse within other - * streams when all prior work submitted to \p active_stream has completed. - */ - cudaError_t DeviceAllocate( - int device, ///< [in] Device on which to place the allocation - void **d_ptr, ///< [out] Reference to pointer to the allocation - size_t bytes, ///< [in] Minimum number of bytes for the allocation - cudaStream_t active_stream = 0) ///< [in] The stream to be associated with this allocation - { - *d_ptr = NULL; - int entrypoint_device = INVALID_DEVICE_ORDINAL; - cudaError_t error = cudaSuccess; - - if (device == INVALID_DEVICE_ORDINAL) - { - if (CubDebug(error = cudaGetDevice(&entrypoint_device))) return error; - device = entrypoint_device; - } - - // Create a block descriptor for the requested allocation - bool found = false; - BlockDescriptor search_key(device); - search_key.associated_stream = active_stream; - NearestPowerOf(search_key.bin, search_key.bytes, bin_growth, bytes); - - if (search_key.bin > max_bin) - { - // Bin is greater than our maximum bin: allocate the request - // exactly and give out-of-bounds bin. It will not be cached - // for reuse when returned. - search_key.bin = INVALID_BIN; - search_key.bytes = bytes; - } - else - { - // Search for a suitable cached allocation: lock - mutex.Lock(); - - if (search_key.bin < min_bin) - { - // Bin is less than minimum bin: round up - search_key.bin = min_bin; - search_key.bytes = min_bin_bytes; - } - - // Iterate through the range of cached blocks on the same device in the same bin - CachedBlocks::iterator block_itr = cached_blocks.lower_bound(search_key); - while ((block_itr != cached_blocks.end()) - && (block_itr->device == device) - && (block_itr->bin == search_key.bin)) - { - // To prevent races with reusing blocks returned by the host but still - // in use by the device, only consider cached blocks that are - // either (from the active stream) or (from an idle stream) - if ((active_stream == block_itr->associated_stream) || - (cudaEventQuery(block_itr->ready_event) != cudaErrorNotReady)) - { - // Reuse existing cache block. Insert into live blocks. - found = true; - search_key = *block_itr; - search_key.associated_stream = active_stream; - live_blocks.insert(search_key); - - // Remove from free blocks - cached_bytes[device].free -= search_key.bytes; - cached_bytes[device].live += search_key.bytes; - - if (debug) _CubLog("\tDevice %d reused cached block at %p (%lld bytes) for stream %lld (previously associated with stream %lld).\n", - device, search_key.d_ptr, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) block_itr->associated_stream); - - cached_blocks.erase(block_itr); - - break; - } - block_itr++; - } - - // Done searching: unlock - mutex.Unlock(); - } - - // Allocate the block if necessary - if (!found) - { - // Set runtime's current device to specified device (entrypoint may not be set) - if (device != entrypoint_device) - { - if (CubDebug(error = cudaGetDevice(&entrypoint_device))) return error; - if (CubDebug(error = cudaSetDevice(device))) return error; - } - - // Attempt to allocate - if (CubDebug(error = cudaMalloc(&search_key.d_ptr, search_key.bytes)) == cudaErrorMemoryAllocation) - { - // The allocation attempt failed: free all cached blocks on device and retry - if (debug) _CubLog("\tDevice %d failed to allocate %lld bytes for stream %lld, retrying after freeing cached allocations", - device, (long long) search_key.bytes, (long long) search_key.associated_stream); - - error = cudaSuccess; // Reset the error we will return - cudaGetLastError(); // Reset CUDART's error - - // Lock - mutex.Lock(); - - // Iterate the range of free blocks on the same device - BlockDescriptor free_key(device); - CachedBlocks::iterator block_itr = cached_blocks.lower_bound(free_key); - - while ((block_itr != cached_blocks.end()) && (block_itr->device == device)) - { - // No need to worry about synchronization with the device: cudaFree is - // blocking and will synchronize across all kernels executing - // on the current device - - // Free device memory and destroy stream event. - if (CubDebug(error = cudaFree(block_itr->d_ptr))) break; - if (CubDebug(error = cudaEventDestroy(block_itr->ready_event))) break; - - // Reduce balance and erase entry - cached_bytes[device].free -= block_itr->bytes; - - if (debug) _CubLog("\tDevice %d freed %lld bytes.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n", - device, (long long) block_itr->bytes, (long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live); - - cached_blocks.erase(block_itr); - - block_itr++; - } - - // Unlock - mutex.Unlock(); - - // Return under error - if (error) return error; - - // Try to allocate again - if (CubDebug(error = cudaMalloc(&search_key.d_ptr, search_key.bytes))) return error; - } - - // Create ready event - if (CubDebug(error = cudaEventCreateWithFlags(&search_key.ready_event, cudaEventDisableTiming))) - return error; - - // Insert into live blocks - mutex.Lock(); - live_blocks.insert(search_key); - cached_bytes[device].live += search_key.bytes; - mutex.Unlock(); - - if (debug) _CubLog("\tDevice %d allocated new device block at %p (%lld bytes associated with stream %lld).\n", - device, search_key.d_ptr, (long long) search_key.bytes, (long long) search_key.associated_stream); - - // Attempt to revert back to previous device if necessary - if ((entrypoint_device != INVALID_DEVICE_ORDINAL) && (entrypoint_device != device)) - { - if (CubDebug(error = cudaSetDevice(entrypoint_device))) return error; - } - } - - // Copy device pointer to output parameter - *d_ptr = search_key.d_ptr; - - if (debug) _CubLog("\t\t%lld available blocks cached (%lld bytes), %lld live blocks outstanding(%lld bytes).\n", - (long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live); - - return error; - } - - - /** - * \brief Provides a suitable allocation of device memory for the given size on the current device. - * - * Once freed, the allocation becomes available immediately for reuse within the \p active_stream - * with which it was associated with during allocation, and it becomes available for reuse within other - * streams when all prior work submitted to \p active_stream has completed. - */ - cudaError_t DeviceAllocate( - void **d_ptr, ///< [out] Reference to pointer to the allocation - size_t bytes, ///< [in] Minimum number of bytes for the allocation - cudaStream_t active_stream = 0) ///< [in] The stream to be associated with this allocation - { - return DeviceAllocate(INVALID_DEVICE_ORDINAL, d_ptr, bytes, active_stream); - } - - - /** - * \brief Frees a live allocation of device memory on the specified device, returning it to the allocator. - * - * Once freed, the allocation becomes available immediately for reuse within the \p active_stream - * with which it was associated with during allocation, and it becomes available for reuse within other - * streams when all prior work submitted to \p active_stream has completed. - */ - cudaError_t DeviceFree( - int device, - void* d_ptr) - { - int entrypoint_device = INVALID_DEVICE_ORDINAL; - cudaError_t error = cudaSuccess; - - if (device == INVALID_DEVICE_ORDINAL) - { - if (CubDebug(error = cudaGetDevice(&entrypoint_device))) - return error; - device = entrypoint_device; - } - - // Lock - mutex.Lock(); - - // Find corresponding block descriptor - bool recached = false; - BlockDescriptor search_key(d_ptr, device); - BusyBlocks::iterator block_itr = live_blocks.find(search_key); - if (block_itr != live_blocks.end()) - { - // Remove from live blocks - search_key = *block_itr; - live_blocks.erase(block_itr); - cached_bytes[device].live -= search_key.bytes; - - // Keep the returned allocation if bin is valid and we won't exceed the max cached threshold - if ((search_key.bin != INVALID_BIN) && (cached_bytes[device].free + search_key.bytes <= max_cached_bytes)) - { - // Insert returned allocation into free blocks - recached = true; - cached_blocks.insert(search_key); - cached_bytes[device].free += search_key.bytes; - - if (debug) _CubLog("\tDevice %d returned %lld bytes from associated stream %lld.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks outstanding. (%lld bytes)\n", - device, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) cached_blocks.size(), - (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live); - } - } - - // Unlock - mutex.Unlock(); - - // First set to specified device (entrypoint may not be set) - if (device != entrypoint_device) - { - if (CubDebug(error = cudaGetDevice(&entrypoint_device))) return error; - if (CubDebug(error = cudaSetDevice(device))) return error; - } - - if (recached) - { - // Insert the ready event in the associated stream (must have current device set properly) - if (CubDebug(error = cudaEventRecord(search_key.ready_event, search_key.associated_stream))) return error; - } - else - { - // Free the allocation from the runtime and cleanup the event. - if (CubDebug(error = cudaFree(d_ptr))) return error; - if (CubDebug(error = cudaEventDestroy(search_key.ready_event))) return error; - - if (debug) _CubLog("\tDevice %d freed %lld bytes from associated stream %lld.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n", - device, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live); - } - - // Reset device - if ((entrypoint_device != INVALID_DEVICE_ORDINAL) && (entrypoint_device != device)) - { - if (CubDebug(error = cudaSetDevice(entrypoint_device))) return error; - } - - return error; - } - - - /** - * \brief Frees a live allocation of device memory on the current device, returning it to the allocator. - * - * Once freed, the allocation becomes available immediately for reuse within the \p active_stream - * with which it was associated with during allocation, and it becomes available for reuse within other - * streams when all prior work submitted to \p active_stream has completed. - */ - cudaError_t DeviceFree( - void* d_ptr) - { - return DeviceFree(INVALID_DEVICE_ORDINAL, d_ptr); - } - - - /** - * \brief Frees all cached device allocations on all devices - */ - cudaError_t FreeAllCached() - { - cudaError_t error = cudaSuccess; - int entrypoint_device = INVALID_DEVICE_ORDINAL; - int current_device = INVALID_DEVICE_ORDINAL; - - mutex.Lock(); - - while (!cached_blocks.empty()) - { - // Get first block - CachedBlocks::iterator begin = cached_blocks.begin(); - - // Get entry-point device ordinal if necessary - if (entrypoint_device == INVALID_DEVICE_ORDINAL) - { - if (CubDebug(error = cudaGetDevice(&entrypoint_device))) break; - } - - // Set current device ordinal if necessary - if (begin->device != current_device) - { - if (CubDebug(error = cudaSetDevice(begin->device))) break; - current_device = begin->device; - } - - // Free device memory - if (CubDebug(error = cudaFree(begin->d_ptr))) break; - if (CubDebug(error = cudaEventDestroy(begin->ready_event))) break; - - // Reduce balance and erase entry - cached_bytes[current_device].free -= begin->bytes; - - if (debug) _CubLog("\tDevice %d freed %lld bytes.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n", - current_device, (long long) begin->bytes, (long long) cached_blocks.size(), (long long) cached_bytes[current_device].free, (long long) live_blocks.size(), (long long) cached_bytes[current_device].live); - - cached_blocks.erase(begin); - } - - mutex.Unlock(); - - // Attempt to revert back to entry-point device if necessary - if (entrypoint_device != INVALID_DEVICE_ORDINAL) - { - if (CubDebug(error = cudaSetDevice(entrypoint_device))) return error; - } - - return error; - } - - - /** - * \brief Destructor - */ - virtual ~CachingDeviceAllocator() - { - if (!skip_cleanup) - FreeAllCached(); - } - -}; - - - - -/** @} */ // end group UtilMgmt - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/util_arch.cuh b/ml-xgboost/cub/cub/util_arch.cuh deleted file mode 100644 index 2be8b8f..0000000 --- a/ml-xgboost/cub/cub/util_arch.cuh +++ /dev/null @@ -1,144 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Static architectural properties by SM version. - */ - -#pragma once - -#include "util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -#if (__CUDACC_VER_MAJOR__ >= 9) -#define CUB_USE_COOPERATIVE_GROUPS -#endif - -/// CUB_PTX_ARCH reflects the PTX version targeted by the active compiler pass (or zero during the host pass). -#ifndef CUB_PTX_ARCH - #ifndef __CUDA_ARCH__ - #define CUB_PTX_ARCH 0 - #else - #define CUB_PTX_ARCH __CUDA_ARCH__ - #endif -#endif - - -/// Whether or not the source targeted by the active compiler pass is allowed to invoke device kernels or methods from the CUDA runtime API. -#ifndef CUB_RUNTIME_FUNCTION - #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__>= 350 && defined(__CUDACC_RDC__)) - #define CUB_RUNTIME_ENABLED - #define CUB_RUNTIME_FUNCTION __host__ __device__ - #else - #define CUB_RUNTIME_FUNCTION __host__ - #endif -#endif - - -/// Number of threads per warp -#ifndef CUB_LOG_WARP_THREADS - #define CUB_LOG_WARP_THREADS(arch) \ - (5) - #define CUB_WARP_THREADS(arch) \ - (1 << CUB_LOG_WARP_THREADS(arch)) - - #define CUB_PTX_WARP_THREADS CUB_WARP_THREADS(CUB_PTX_ARCH) - #define CUB_PTX_LOG_WARP_THREADS CUB_LOG_WARP_THREADS(CUB_PTX_ARCH) -#endif - - -/// Number of smem banks -#ifndef CUB_LOG_SMEM_BANKS - #define CUB_LOG_SMEM_BANKS(arch) \ - ((arch >= 200) ? \ - (5) : \ - (4)) - #define CUB_SMEM_BANKS(arch) \ - (1 << CUB_LOG_SMEM_BANKS(arch)) - - #define CUB_PTX_LOG_SMEM_BANKS CUB_LOG_SMEM_BANKS(CUB_PTX_ARCH) - #define CUB_PTX_SMEM_BANKS CUB_SMEM_BANKS(CUB_PTX_ARCH) -#endif - - -/// Oversubscription factor -#ifndef CUB_SUBSCRIPTION_FACTOR - #define CUB_SUBSCRIPTION_FACTOR(arch) \ - ((arch >= 300) ? \ - (5) : \ - ((arch >= 200) ? \ - (3) : \ - (10))) - #define CUB_PTX_SUBSCRIPTION_FACTOR CUB_SUBSCRIPTION_FACTOR(CUB_PTX_ARCH) -#endif - - -/// Prefer padding overhead vs X-way conflicts greater than this threshold -#ifndef CUB_PREFER_CONFLICT_OVER_PADDING - #define CUB_PREFER_CONFLICT_OVER_PADDING(arch) \ - ((arch >= 300) ? \ - (1) : \ - (4)) - #define CUB_PTX_PREFER_CONFLICT_OVER_PADDING CUB_PREFER_CONFLICT_OVER_PADDING(CUB_PTX_ARCH) -#endif - - -/// Scale down the number of warps to keep same amount of "tile" storage as the nominal configuration for 4B data. Minimum of two warps. -#define CUB_BLOCK_THREADS(NOMINAL_4B_BLOCK_THREADS, T, PTX_ARCH) \ - (CUB_MIN( \ - NOMINAL_4B_BLOCK_THREADS * 2, \ - CUB_WARP_THREADS(PTX_ARCH) * CUB_MAX( \ - (NOMINAL_4B_BLOCK_THREADS / CUB_WARP_THREADS(PTX_ARCH)) * 3 / 4, \ - (NOMINAL_4B_BLOCK_THREADS / CUB_WARP_THREADS(PTX_ARCH)) * 4 / sizeof(T)))) - -/// Scale up/down number of items per thread to keep the same amount of "tile" storage as the nominal configuration for 4B data. Minimum 1 item per thread -#define CUB_ITEMS_PER_THREAD(NOMINAL_4B_ITEMS_PER_THREAD, NOMINAL_4B_BLOCK_THREADS, T, PTX_ARCH) \ - (CUB_MIN( \ - NOMINAL_4B_ITEMS_PER_THREAD * 2, \ - CUB_MAX( \ - 1, \ - (NOMINAL_4B_ITEMS_PER_THREAD * NOMINAL_4B_BLOCK_THREADS * 4 / sizeof(T)) / CUB_BLOCK_THREADS(NOMINAL_4B_BLOCK_THREADS, T, PTX_ARCH)))) - - -#define CUB_NOMINAL_CONFIG(NOMINAL_4B_BLOCK_THREADS, NOMINAL_4B_ITEMS_PER_THREAD, T) \ - CUB_BLOCK_THREADS(NOMINAL_4B_BLOCK_THREADS, T, 200), \ - CUB_ITEMS_PER_THREAD(NOMINAL_4B_ITEMS_PER_THREAD, NOMINAL_4B_BLOCK_THREADS, T, 200) - - -#endif // Do not document - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/util_debug.cuh b/ml-xgboost/cub/cub/util_debug.cuh deleted file mode 100644 index 168b433..0000000 --- a/ml-xgboost/cub/cub/util_debug.cuh +++ /dev/null @@ -1,145 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Error and event logging routines. - * - * The following macros definitions are supported: - * - \p CUB_LOG. Simple event messages are printed to \p stdout. - */ - -#pragma once - -#include -#include "util_namespace.cuh" -#include "util_arch.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup UtilMgmt - * @{ - */ - - -/// CUB error reporting macro (prints error messages to stderr) -#if (defined(DEBUG) || defined(_DEBUG)) && !defined(CUB_STDERR) - #define CUB_STDERR -#endif - - - -/** - * \brief %If \p CUB_STDERR is defined and \p error is not \p cudaSuccess, the corresponding error message is printed to \p stderr (or \p stdout in device code) along with the supplied source context. - * - * \return The CUDA error. - */ -__host__ __device__ __forceinline__ cudaError_t Debug( - cudaError_t error, - const char* filename, - int line) -{ - (void)filename; - (void)line; -#ifdef CUB_STDERR - if (error) - { - #if (CUB_PTX_ARCH == 0) - fprintf(stderr, "CUDA error %d [%s, %d]: %s\n", error, filename, line, cudaGetErrorString(error)); - fflush(stderr); - #elif (CUB_PTX_ARCH >= 200) - printf("CUDA error %d [block (%d,%d,%d) thread (%d,%d,%d), %s, %d]\n", error, blockIdx.z, blockIdx.y, blockIdx.x, threadIdx.z, threadIdx.y, threadIdx.x, filename, line); - #endif - } -#endif - return error; -} - - -/** - * \brief Debug macro - */ -#ifndef CubDebug - #define CubDebug(e) cub::Debug((e), __FILE__, __LINE__) -#endif - - -/** - * \brief Debug macro with exit - */ -#ifndef CubDebugExit - #define CubDebugExit(e) if (cub::Debug((e), __FILE__, __LINE__)) { exit(1); } -#endif - - -/** - * \brief Log macro for printf statements. - */ -#if !defined(_CubLog) - #if !(defined(__clang__) && defined(__CUDA__)) - #if (CUB_PTX_ARCH == 0) - #define _CubLog(format, ...) printf(format,__VA_ARGS__); - #elif (CUB_PTX_ARCH >= 200) - #define _CubLog(format, ...) printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, blockIdx.z, blockIdx.y, blockIdx.x, threadIdx.z, threadIdx.y, threadIdx.x, __VA_ARGS__); - #endif - #else - // XXX shameless hack for clang around variadic printf... - // Compilies w/o supplying -std=c++11 but shows warning, - // so we sielence them :) - #pragma clang diagnostic ignored "-Wc++11-extensions" - #pragma clang diagnostic ignored "-Wunnamed-type-template-args" - template - inline __host__ __device__ void va_printf(char const* format, Args const&... args) - { - #ifdef __CUDA_ARCH__ - printf(format, blockIdx.z, blockIdx.y, blockIdx.x, threadIdx.z, threadIdx.y, threadIdx.x, args...); - #else - printf(format, args...); - #endif - } - #ifndef __CUDA_ARCH__ - #define _CubLog(format, ...) va_printf(format,__VA_ARGS__); - #else - #define _CubLog(format, ...) va_printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, __VA_ARGS__); - #endif - #endif -#endif - - - - -/** @} */ // end group UtilMgmt - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/util_device.cuh b/ml-xgboost/cub/cub/util_device.cuh deleted file mode 100644 index 38ab5b0..0000000 --- a/ml-xgboost/cub/cub/util_device.cuh +++ /dev/null @@ -1,347 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Properties of a given CUDA device and the corresponding PTX bundle - */ - -#pragma once - -#include "util_type.cuh" -#include "util_arch.cuh" -#include "util_debug.cuh" -#include "util_namespace.cuh" -#include "util_macro.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup UtilMgmt - * @{ - */ - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - -/** - * Alias temporaries to externally-allocated device storage (or simply return the amount of storage needed). - */ -template -__host__ __device__ __forceinline__ -cudaError_t AliasTemporaries( - void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Size in bytes of \t d_temp_storage allocation - void* (&allocations)[ALLOCATIONS], ///< [in,out] Pointers to device allocations needed - size_t (&allocation_sizes)[ALLOCATIONS]) ///< [in] Sizes in bytes of device allocations needed -{ - const int ALIGN_BYTES = 256; - const int ALIGN_MASK = ~(ALIGN_BYTES - 1); - - // Compute exclusive prefix sum over allocation requests - size_t allocation_offsets[ALLOCATIONS]; - size_t bytes_needed = 0; - for (int i = 0; i < ALLOCATIONS; ++i) - { - size_t allocation_bytes = (allocation_sizes[i] + ALIGN_BYTES - 1) & ALIGN_MASK; - allocation_offsets[i] = bytes_needed; - bytes_needed += allocation_bytes; - } - bytes_needed += ALIGN_BYTES - 1; - - // Check if the caller is simply requesting the size of the storage allocation - if (!d_temp_storage) - { - temp_storage_bytes = bytes_needed; - return cudaSuccess; - } - - // Check if enough storage provided - if (temp_storage_bytes < bytes_needed) - { - return CubDebug(cudaErrorInvalidValue); - } - - // Alias - d_temp_storage = (void *) ((size_t(d_temp_storage) + ALIGN_BYTES - 1) & ALIGN_MASK); - for (int i = 0; i < ALLOCATIONS; ++i) - { - allocations[i] = static_cast(d_temp_storage) + allocation_offsets[i]; - } - - return cudaSuccess; -} - - -/** - * Empty kernel for querying PTX manifest metadata (e.g., version) for the current device - */ -template -__global__ void EmptyKernel(void) { } - - -#endif // DOXYGEN_SHOULD_SKIP_THIS - -/** - * \brief Retrieves the PTX version that will be used on the current device (major * 100 + minor * 10) - */ -CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t PtxVersion(int &ptx_version) -{ - struct Dummy - { - /// Type definition of the EmptyKernel kernel entry point - typedef void (*EmptyKernelPtr)(); - - /// Force EmptyKernel to be generated if this class is used - CUB_RUNTIME_FUNCTION __forceinline__ - EmptyKernelPtr Empty() - { - return EmptyKernel; - } - }; - - -#ifndef CUB_RUNTIME_ENABLED - (void)ptx_version; - - // CUDA API calls not supported from this device - return cudaErrorInvalidConfiguration; - -#elif (CUB_PTX_ARCH > 0) - - ptx_version = CUB_PTX_ARCH; - return cudaSuccess; - -#else - - cudaError_t error = cudaSuccess; - do - { - cudaFuncAttributes empty_kernel_attrs; - if (CubDebug(error = cudaFuncGetAttributes(&empty_kernel_attrs, EmptyKernel))) break; - ptx_version = empty_kernel_attrs.ptxVersion * 10; - } - while (0); - - return error; - -#endif -} - - -/** - * \brief Retrieves the SM version (major * 100 + minor * 10) - */ -CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t SmVersion(int &sm_version, int device_ordinal) -{ -#ifndef CUB_RUNTIME_ENABLED - (void)sm_version; - (void)device_ordinal; - - // CUDA API calls not supported from this device - return cudaErrorInvalidConfiguration; - -#else - - cudaError_t error = cudaSuccess; - do - { - // Fill in SM version - int major, minor; - if (CubDebug(error = cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, device_ordinal))) break; - if (CubDebug(error = cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, device_ordinal))) break; - sm_version = major * 100 + minor * 10; - } - while (0); - - return error; - -#endif -} - - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -/** - * Synchronize the stream if specified - */ -CUB_RUNTIME_FUNCTION __forceinline__ -static cudaError_t SyncStream(cudaStream_t stream) -{ -#if (CUB_PTX_ARCH == 0) - return cudaStreamSynchronize(stream); -#else - (void)stream; - // Device can't yet sync on a specific stream - return cudaDeviceSynchronize(); -#endif -} - - -/** - * \brief Computes maximum SM occupancy in thread blocks for executing the given kernel function pointer \p kernel_ptr on the current device with \p block_threads per thread block. - * - * \par Snippet - * The code snippet below illustrates the use of the MaxSmOccupancy function. - * \par - * \code - * #include // or equivalently - * - * template - * __global__ void ExampleKernel() - * { - * // Allocate shared memory for BlockScan - * __shared__ volatile T buffer[4096]; - * - * ... - * } - * - * ... - * - * // Determine SM occupancy for ExampleKernel specialized for unsigned char - * int max_sm_occupancy; - * MaxSmOccupancy(max_sm_occupancy, ExampleKernel, 64); - * - * // max_sm_occupancy <-- 4 on SM10 - * // max_sm_occupancy <-- 8 on SM20 - * // max_sm_occupancy <-- 12 on SM35 - * - * \endcode - * - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t MaxSmOccupancy( - int &max_sm_occupancy, ///< [out] maximum number of thread blocks that can reside on a single SM - KernelPtr kernel_ptr, ///< [in] Kernel pointer for which to compute SM occupancy - int block_threads, ///< [in] Number of threads per thread block - int dynamic_smem_bytes = 0) -{ -#ifndef CUB_RUNTIME_ENABLED - (void)dynamic_smem_bytes; - (void)block_threads; - (void)kernel_ptr; - (void)max_sm_occupancy; - - // CUDA API calls not supported from this device - return CubDebug(cudaErrorInvalidConfiguration); - -#else - - return cudaOccupancyMaxActiveBlocksPerMultiprocessor ( - &max_sm_occupancy, - kernel_ptr, - block_threads, - dynamic_smem_bytes); - -#endif // CUB_RUNTIME_ENABLED -} - - -/****************************************************************************** - * Policy management - ******************************************************************************/ - -/** - * Kernel dispatch configuration - */ -struct KernelConfig -{ - int block_threads; - int items_per_thread; - int tile_size; - int sm_occupancy; - - CUB_RUNTIME_FUNCTION __forceinline__ - KernelConfig() : block_threads(0), items_per_thread(0), tile_size(0), sm_occupancy(0) {} - - template - CUB_RUNTIME_FUNCTION __forceinline__ - cudaError_t Init(KernelPtrT kernel_ptr) - { - block_threads = AgentPolicyT::BLOCK_THREADS; - items_per_thread = AgentPolicyT::ITEMS_PER_THREAD; - tile_size = block_threads * items_per_thread; - cudaError_t retval = MaxSmOccupancy(sm_occupancy, kernel_ptr, block_threads); - return retval; - } -}; - - - -/// Helper for dispatching into a policy chain -template -struct ChainedPolicy -{ - /// The policy for the active compiler pass - typedef typename If<(CUB_PTX_ARCH < PTX_VERSION), typename PrevPolicyT::ActivePolicy, PolicyT>::Type ActivePolicy; - - /// Specializes and dispatches op in accordance to the first policy in the chain of adequate PTX version - template - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Invoke(int ptx_version, FunctorT &op) - { - if (ptx_version < PTX_VERSION) { - return PrevPolicyT::Invoke(ptx_version, op); - } - return op.template Invoke(); - } -}; - -/// Helper for dispatching into a policy chain (end-of-chain specialization) -template -struct ChainedPolicy -{ - /// The policy for the active compiler pass - typedef PolicyT ActivePolicy; - - /// Specializes and dispatches op in accordance to the first policy in the chain of adequate PTX version - template - CUB_RUNTIME_FUNCTION __forceinline__ - static cudaError_t Invoke(int /*ptx_version*/, FunctorT &op) { - return op.template Invoke(); - } -}; - - - - -#endif // Do not document - - - - -/** @} */ // end group UtilMgmt - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/util_macro.cuh b/ml-xgboost/cub/cub/util_macro.cuh deleted file mode 100644 index 8c7756d..0000000 --- a/ml-xgboost/cub/cub/util_macro.cuh +++ /dev/null @@ -1,103 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Common C/C++ macro utilities - ******************************************************************************/ - -#pragma once - -#include "util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup UtilModule - * @{ - */ - -#ifndef CUB_ALIGN - #if defined(_WIN32) || defined(_WIN64) - /// Align struct - #define CUB_ALIGN(bytes) __declspec(align(32)) - #else - /// Align struct - #define CUB_ALIGN(bytes) __attribute__((aligned(bytes))) - #endif -#endif - -#ifndef CUB_MAX - /// Select maximum(a, b) - #define CUB_MAX(a, b) (((b) > (a)) ? (b) : (a)) -#endif - -#ifndef CUB_MIN - /// Select minimum(a, b) - #define CUB_MIN(a, b) (((b) < (a)) ? (b) : (a)) -#endif - -#ifndef CUB_QUOTIENT_FLOOR - /// Quotient of x/y rounded down to nearest integer - #define CUB_QUOTIENT_FLOOR(x, y) ((x) / (y)) -#endif - -#ifndef CUB_QUOTIENT_CEILING - /// Quotient of x/y rounded up to nearest integer - #define CUB_QUOTIENT_CEILING(x, y) (((x) + (y) - 1) / (y)) -#endif - -#ifndef CUB_ROUND_UP_NEAREST - /// x rounded up to the nearest multiple of y - #define CUB_ROUND_UP_NEAREST(x, y) ((((x) + (y) - 1) / (y)) * y) -#endif - -#ifndef CUB_ROUND_DOWN_NEAREST - /// x rounded down to the nearest multiple of y - #define CUB_ROUND_DOWN_NEAREST(x, y) (((x) / (y)) * y) -#endif - - -#ifndef CUB_STATIC_ASSERT - #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - #define CUB_CAT_(a, b) a ## b - #define CUB_CAT(a, b) CUB_CAT_(a, b) - #endif // DOXYGEN_SHOULD_SKIP_THIS - - /// Static assert - #define CUB_STATIC_ASSERT(cond, msg) typedef int CUB_CAT(cub_static_assert, __LINE__)[(cond) ? 1 : -1] -#endif - -/** @} */ // end group UtilModule - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/util_namespace.cuh b/ml-xgboost/cub/cub/util_namespace.cuh deleted file mode 100644 index 8c05718..0000000 --- a/ml-xgboost/cub/cub/util_namespace.cuh +++ /dev/null @@ -1,46 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Place-holder for prefixing the cub namespace - */ - -#pragma once - -// For example: -//#define CUB_NS_PREFIX namespace thrust{ namespace detail { -//#define CUB_NS_POSTFIX } } - -#ifndef CUB_NS_PREFIX -#define CUB_NS_PREFIX -#endif - -#ifndef CUB_NS_POSTFIX -#define CUB_NS_POSTFIX -#endif diff --git a/ml-xgboost/cub/cub/util_ptx.cuh b/ml-xgboost/cub/cub/util_ptx.cuh deleted file mode 100644 index a15ce66..0000000 --- a/ml-xgboost/cub/cub/util_ptx.cuh +++ /dev/null @@ -1,673 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * PTX intrinsics - */ - - -#pragma once - -#include "util_type.cuh" -#include "util_arch.cuh" -#include "util_namespace.cuh" -#include "util_debug.cuh" - - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup UtilPtx - * @{ - */ - - -/****************************************************************************** - * PTX helper macros - ******************************************************************************/ - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -/** - * Register modifier for pointer-types (for inlining PTX assembly) - */ -#if defined(_WIN64) || defined(__LP64__) - #define __CUB_LP64__ 1 - // 64-bit register modifier for inlined asm - #define _CUB_ASM_PTR_ "l" - #define _CUB_ASM_PTR_SIZE_ "u64" -#else - #define __CUB_LP64__ 0 - // 32-bit register modifier for inlined asm - #define _CUB_ASM_PTR_ "r" - #define _CUB_ASM_PTR_SIZE_ "u32" -#endif - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - -/****************************************************************************** - * Inlined PTX intrinsics - ******************************************************************************/ - -/** - * \brief Shift-right then add. Returns (\p x >> \p shift) + \p addend. - */ -__device__ __forceinline__ unsigned int SHR_ADD( - unsigned int x, - unsigned int shift, - unsigned int addend) -{ - unsigned int ret; -#if CUB_PTX_ARCH >= 200 - asm volatile("vshr.u32.u32.u32.clamp.add %0, %1, %2, %3;" : - "=r"(ret) : "r"(x), "r"(shift), "r"(addend)); -#else - ret = (x >> shift) + addend; -#endif - return ret; -} - - -/** - * \brief Shift-left then add. Returns (\p x << \p shift) + \p addend. - */ -__device__ __forceinline__ unsigned int SHL_ADD( - unsigned int x, - unsigned int shift, - unsigned int addend) -{ - unsigned int ret; -#if CUB_PTX_ARCH >= 200 - asm volatile("vshl.u32.u32.u32.clamp.add %0, %1, %2, %3;" : - "=r"(ret) : "r"(x), "r"(shift), "r"(addend)); -#else - ret = (x << shift) + addend; -#endif - return ret; -} - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -/** - * Bitfield-extract. - */ -template -__device__ __forceinline__ unsigned int BFE( - UnsignedBits source, - unsigned int bit_start, - unsigned int num_bits, - Int2Type /*byte_len*/) -{ - unsigned int bits; -#if CUB_PTX_ARCH >= 200 - asm volatile("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"((unsigned int) source), "r"(bit_start), "r"(num_bits)); -#else - const unsigned int MASK = (1 << num_bits) - 1; - bits = (source >> bit_start) & MASK; -#endif - return bits; -} - - -/** - * Bitfield-extract for 64-bit types. - */ -template -__device__ __forceinline__ unsigned int BFE( - UnsignedBits source, - unsigned int bit_start, - unsigned int num_bits, - Int2Type<8> /*byte_len*/) -{ - const unsigned long long MASK = (1ull << num_bits) - 1; - return (source >> bit_start) & MASK; -} - -#endif // DOXYGEN_SHOULD_SKIP_THIS - -/** - * \brief Bitfield-extract. Extracts \p num_bits from \p source starting at bit-offset \p bit_start. The input \p source may be an 8b, 16b, 32b, or 64b unsigned integer type. - */ -template -__device__ __forceinline__ unsigned int BFE( - UnsignedBits source, - unsigned int bit_start, - unsigned int num_bits) -{ - return BFE(source, bit_start, num_bits, Int2Type()); -} - - -/** - * \brief Bitfield insert. Inserts the \p num_bits least significant bits of \p y into \p x at bit-offset \p bit_start. - */ -__device__ __forceinline__ void BFI( - unsigned int &ret, - unsigned int x, - unsigned int y, - unsigned int bit_start, - unsigned int num_bits) -{ -#if CUB_PTX_ARCH >= 200 - asm volatile("bfi.b32 %0, %1, %2, %3, %4;" : - "=r"(ret) : "r"(y), "r"(x), "r"(bit_start), "r"(num_bits)); -#else - x <<= bit_start; - unsigned int MASK_X = ((1 << num_bits) - 1) << bit_start; - unsigned int MASK_Y = ~MASK_X; - ret = (y & MASK_Y) | (x & MASK_X); -#endif -} - - -/** - * \brief Three-operand add. Returns \p x + \p y + \p z. - */ -__device__ __forceinline__ unsigned int IADD3(unsigned int x, unsigned int y, unsigned int z) -{ -#if CUB_PTX_ARCH >= 200 - asm volatile("vadd.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(x) : "r"(x), "r"(y), "r"(z)); -#else - x = x + y + z; -#endif - return x; -} - - -/** - * \brief Byte-permute. Pick four arbitrary bytes from two 32-bit registers, and reassemble them into a 32-bit destination register. For SM2.0 or later. - * - * \par - * The bytes in the two source registers \p a and \p b are numbered from 0 to 7: - * {\p b, \p a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}. For each of the four bytes - * {b3, b2, b1, b0} selected in the return value, a 4-bit selector is defined within - * the four lower "nibbles" of \p index: {\p index } = {n7, n6, n5, n4, n3, n2, n1, n0} - * - * \par Snippet - * The code snippet below illustrates byte-permute. - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * int a = 0x03020100; - * int b = 0x07060504; - * int index = 0x00007531; - * - * int selected = PRMT(a, b, index); // 0x07050301 - * - * \endcode - * - */ -__device__ __forceinline__ int PRMT(unsigned int a, unsigned int b, unsigned int index) -{ - int ret; - asm volatile("prmt.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(a), "r"(b), "r"(index)); - return ret; -} - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -/** - * Sync-threads barrier. - */ -__device__ __forceinline__ void BAR(int count) -{ - asm volatile("bar.sync 1, %0;" : : "r"(count)); -} - -/** - * CTA barrier - */ -__device__ __forceinline__ void CTA_SYNC() -{ -#ifdef CUB_USE_COOPERATIVE_GROUPS - __barrier_sync(0); -#else - __syncthreads(); -#endif -} - - -/** - * CTA barrier with predicate - */ -__device__ __forceinline__ int CTA_SYNC_AND(int p) -{ - return __syncthreads_and(p); -} - - -/** - * Warp barrier - */ -__device__ __forceinline__ void WARP_SYNC(unsigned int member_mask) -{ -#ifdef CUB_USE_COOPERATIVE_GROUPS - __syncwarp(member_mask); -#endif -} - - -/** - * Warp any - */ -__device__ __forceinline__ int WARP_ANY(int predicate, unsigned int member_mask) -{ -#ifdef CUB_USE_COOPERATIVE_GROUPS - return __any_sync(member_mask, predicate); -#else - return ::__any(predicate); -#endif -} - - -/** - * Warp any - */ -__device__ __forceinline__ int WARP_ALL(int predicate, unsigned int member_mask) -{ -#ifdef CUB_USE_COOPERATIVE_GROUPS - return __all_sync(member_mask, predicate); -#else - return ::__all(predicate); -#endif -} - - -/** - * Warp ballot - */ -__device__ __forceinline__ int WARP_BALLOT(int predicate, unsigned int member_mask) -{ -#ifdef CUB_USE_COOPERATIVE_GROUPS - return __ballot_sync(member_mask, predicate); -#else - return __ballot(predicate); -#endif -} - -/** - * Warp synchronous shfl_up - */ -__device__ __forceinline__ -unsigned int SHFL_UP_SYNC(unsigned int word, int src_offset, int first_lane, unsigned int member_mask) -{ -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile("shfl.sync.up.b32 %0, %1, %2, %3, %4;" - : "=r"(word) : "r"(word), "r"(src_offset), "r"(first_lane), "r"(member_mask)); -#else - asm volatile("shfl.up.b32 %0, %1, %2, %3;" - : "=r"(word) : "r"(word), "r"(src_offset), "r"(first_lane)); -#endif - return word; -} - -/** - * Warp synchronous shfl_down - */ -__device__ __forceinline__ -unsigned int SHFL_DOWN_SYNC(unsigned int word, int src_offset, int last_lane, unsigned int member_mask) -{ -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile("shfl.sync.down.b32 %0, %1, %2, %3, %4;" - : "=r"(word) : "r"(word), "r"(src_offset), "r"(last_lane), "r"(member_mask)); -#else - asm volatile("shfl.down.b32 %0, %1, %2, %3;" - : "=r"(word) : "r"(word), "r"(src_offset), "r"(last_lane)); -#endif - return word; -} - -/** - * Warp synchronous shfl_idx - */ -__device__ __forceinline__ -unsigned int SHFL_IDX_SYNC(unsigned int word, int src_lane, int last_lane, unsigned int member_mask) -{ -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile("shfl.sync.idx.b32 %0, %1, %2, %3, %4;" - : "=r"(word) : "r"(word), "r"(src_lane), "r"(last_lane), "r"(member_mask)); -#else - asm volatile("shfl.idx.b32 %0, %1, %2, %3;" - : "=r"(word) : "r"(word), "r"(src_lane), "r"(last_lane)); -#endif - return word; -} - -/** - * Floating point multiply. (Mantissa LSB rounds towards zero.) - */ -__device__ __forceinline__ float FMUL_RZ(float a, float b) -{ - float d; - asm volatile("mul.rz.f32 %0, %1, %2;" : "=f"(d) : "f"(a), "f"(b)); - return d; -} - - -/** - * Floating point multiply-add. (Mantissa LSB rounds towards zero.) - */ -__device__ __forceinline__ float FFMA_RZ(float a, float b, float c) -{ - float d; - asm volatile("fma.rz.f32 %0, %1, %2, %3;" : "=f"(d) : "f"(a), "f"(b), "f"(c)); - return d; -} - -#endif // DOXYGEN_SHOULD_SKIP_THIS - -/** - * \brief Terminates the calling thread - */ -__device__ __forceinline__ void ThreadExit() { - asm volatile("exit;"); -} - - -/** - * \brief Abort execution and generate an interrupt to the host CPU - */ -__device__ __forceinline__ void ThreadTrap() { - asm volatile("trap;"); -} - - -/** - * \brief Returns the row-major linear thread identifier for a multidimensional threadblock - */ -__device__ __forceinline__ int RowMajorTid(int block_dim_x, int block_dim_y, int block_dim_z) -{ - return ((block_dim_z == 1) ? 0 : (threadIdx.z * block_dim_x * block_dim_y)) + - ((block_dim_y == 1) ? 0 : (threadIdx.y * block_dim_x)) + - threadIdx.x; -} - - -/** - * \brief Returns the warp lane ID of the calling thread - */ -__device__ __forceinline__ unsigned int LaneId() -{ - unsigned int ret; - asm volatile("mov.u32 %0, %%laneid;" : "=r"(ret) ); - return ret; -} - - -/** - * \brief Returns the warp ID of the calling thread. Warp ID is guaranteed to be unique among warps, but may not correspond to a zero-based ranking within the thread block. - */ -__device__ __forceinline__ unsigned int WarpId() -{ - unsigned int ret; - asm volatile("mov.u32 %0, %%warpid;" : "=r"(ret) ); - return ret; -} - -/** - * \brief Returns the warp lane mask of all lanes less than the calling thread - */ -__device__ __forceinline__ unsigned int LaneMaskLt() -{ - unsigned int ret; - asm volatile("mov.u32 %0, %%lanemask_lt;" : "=r"(ret) ); - return ret; -} - -/** - * \brief Returns the warp lane mask of all lanes less than or equal to the calling thread - */ -__device__ __forceinline__ unsigned int LaneMaskLe() -{ - unsigned int ret; - asm volatile("mov.u32 %0, %%lanemask_le;" : "=r"(ret) ); - return ret; -} - -/** - * \brief Returns the warp lane mask of all lanes greater than the calling thread - */ -__device__ __forceinline__ unsigned int LaneMaskGt() -{ - unsigned int ret; - asm volatile("mov.u32 %0, %%lanemask_gt;" : "=r"(ret) ); - return ret; -} - -/** - * \brief Returns the warp lane mask of all lanes greater than or equal to the calling thread - */ -__device__ __forceinline__ unsigned int LaneMaskGe() -{ - unsigned int ret; - asm volatile("mov.u32 %0, %%lanemask_ge;" : "=r"(ret) ); - return ret; -} - -/** @} */ // end group UtilPtx - - - -/** - * \brief Shuffle-up for any data type. Each warp-lanei obtains the value \p input contributed by warp-lanei-src_offset. For thread lanes \e i < src_offset, the thread's own \p input is returned to the thread. ![](shfl_up_logo.png) - * \ingroup WarpModule - * - * \par - * - Available only for SM3.0 or newer - * - * \par Snippet - * The code snippet below illustrates each thread obtaining a \p double value from the - * predecessor of its predecessor. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Obtain one input item per thread - * double thread_data = ... - * - * // Obtain item from two ranks below - * double peer_data = ShuffleUp(thread_data, 2, 0, 0xffffffff); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the first warp of threads is {1.0, 2.0, 3.0, 4.0, 5.0, ..., 32.0}. - * The corresponding output \p peer_data will be {1.0, 2.0, 1.0, 2.0, 3.0, ..., 30.0}. - * - */ -template -__device__ __forceinline__ T ShuffleUp( - T input, ///< [in] The value to broadcast - int src_offset, ///< [in] The relative down-offset of the peer to read from - int first_lane, ///< [in] Index of first lane in segment (typically 0) - unsigned int member_mask) ///< [in] 32-bit mask of participating warp lanes -{ - typedef typename UnitWord::ShuffleWord ShuffleWord; - - const int WORDS = (sizeof(T) + sizeof(ShuffleWord) - 1) / sizeof(ShuffleWord); - - T output; - ShuffleWord *output_alias = reinterpret_cast(&output); - ShuffleWord *input_alias = reinterpret_cast(&input); - - unsigned int shuffle_word; - shuffle_word = SHFL_UP_SYNC((unsigned int)input_alias[0], src_offset, first_lane, member_mask); - output_alias[0] = shuffle_word; - - #pragma unroll - for (int WORD = 1; WORD < WORDS; ++WORD) - { - shuffle_word = SHFL_UP_SYNC((unsigned int)input_alias[WORD], src_offset, first_lane, member_mask); - output_alias[WORD] = shuffle_word; - } - - return output; -} - - -/** - * \brief Shuffle-down for any data type. Each warp-lanei obtains the value \p input contributed by warp-lanei+src_offset. For thread lanes \e i >= WARP_THREADS, the thread's own \p input is returned to the thread. ![](shfl_down_logo.png) - * \ingroup WarpModule - * - * \par - * - Available only for SM3.0 or newer - * - * \par Snippet - * The code snippet below illustrates each thread obtaining a \p double value from the - * successor of its successor. - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Obtain one input item per thread - * double thread_data = ... - * - * // Obtain item from two ranks below - * double peer_data = ShuffleDown(thread_data, 2, 31, 0xffffffff); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the first warp of threads is {1.0, 2.0, 3.0, 4.0, 5.0, ..., 32.0}. - * The corresponding output \p peer_data will be {3.0, 4.0, 5.0, 6.0, 7.0, ..., 32.0}. - * - */ -template -__device__ __forceinline__ T ShuffleDown( - T input, ///< [in] The value to broadcast - int src_offset, ///< [in] The relative up-offset of the peer to read from - int last_lane, ///< [in] Index of first lane in segment (typically 31) - unsigned int member_mask) ///< [in] 32-bit mask of participating warp lanes -{ - typedef typename UnitWord::ShuffleWord ShuffleWord; - - const int WORDS = (sizeof(T) + sizeof(ShuffleWord) - 1) / sizeof(ShuffleWord); - - T output; - ShuffleWord *output_alias = reinterpret_cast(&output); - ShuffleWord *input_alias = reinterpret_cast(&input); - - unsigned int shuffle_word; - shuffle_word = SHFL_DOWN_SYNC((unsigned int)input_alias[0], src_offset, last_lane, member_mask); - output_alias[0] = shuffle_word; - - #pragma unroll - for (int WORD = 1; WORD < WORDS; ++WORD) - { - shuffle_word = SHFL_DOWN_SYNC((unsigned int)input_alias[WORD], src_offset, last_lane, member_mask); - output_alias[WORD] = shuffle_word; - } - - return output; -} - - -/** - * \brief Shuffle-broadcast for any data type. Each warp-lanei obtains the value \p input - * contributed by warp-lanesrc_lane. For \p src_lane < 0 or \p src_lane >= WARP_THREADS, - * then the thread's own \p input is returned to the thread. ![](shfl_broadcast_logo.png) - * - * \ingroup WarpModule - * - * \par - * - Available only for SM3.0 or newer - * - * \par Snippet - * The code snippet below illustrates each thread obtaining a \p double value from warp-lane0. - * - * \par - * \code - * #include // or equivalently - * - * __global__ void ExampleKernel(...) - * { - * // Obtain one input item per thread - * double thread_data = ... - * - * // Obtain item from thread 0 - * double peer_data = ShuffleIndex(thread_data, 0, 32, 0xffffffff); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the first warp of threads is {1.0, 2.0, 3.0, 4.0, 5.0, ..., 32.0}. - * The corresponding output \p peer_data will be {1.0, 1.0, 1.0, 1.0, 1.0, ..., 1.0}. - * - */ -template -__device__ __forceinline__ T ShuffleIndex( - T input, ///< [in] The value to broadcast - int src_lane, ///< [in] Which warp lane is to do the broadcasting - int logical_warp_threads, ///< [in] Number of threads per logical warp - unsigned int member_mask) ///< [in] 32-bit mask of participating warp lanes -{ - typedef typename UnitWord::ShuffleWord ShuffleWord; - - const int WORDS = (sizeof(T) + sizeof(ShuffleWord) - 1) / sizeof(ShuffleWord); - - T output; - ShuffleWord *output_alias = reinterpret_cast(&output); - ShuffleWord *input_alias = reinterpret_cast(&input); - - unsigned int shuffle_word; - shuffle_word = SHFL_IDX_SYNC((unsigned int)input_alias[0], - src_lane, - logical_warp_threads - 1, - member_mask); - - output_alias[0] = shuffle_word; - - #pragma unroll - for (int WORD = 1; WORD < WORDS; ++WORD) - { - shuffle_word = SHFL_IDX_SYNC((unsigned int)input_alias[WORD], - src_lane, - logical_warp_threads - 1, - member_mask); - - output_alias[WORD] = shuffle_word; - } - - return output; -} - - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/util_type.cuh b/ml-xgboost/cub/cub/util_type.cuh deleted file mode 100644 index 6f822b3..0000000 --- a/ml-xgboost/cub/cub/util_type.cuh +++ /dev/null @@ -1,1141 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * Common type manipulation (metaprogramming) utilities - */ - -#pragma once - -#include -#include -#include - -#include "util_macro.cuh" -#include "util_arch.cuh" -#include "util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup UtilModule - * @{ - */ - - - -/****************************************************************************** - * Type equality - ******************************************************************************/ - -/** - * \brief Type selection (IF ? ThenType : ElseType) - */ -template -struct If -{ - /// Conditional type result - typedef ThenType Type; // true -}; - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -template -struct If -{ - typedef ElseType Type; // false -}; - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - - -/****************************************************************************** - * Conditional types - ******************************************************************************/ - -/** - * \brief Type equality test - */ -template -struct Equals -{ - enum { - VALUE = 0, - NEGATE = 1 - }; -}; - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -template -struct Equals -{ - enum { - VALUE = 1, - NEGATE = 0 - }; -}; - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - -/****************************************************************************** - * Static math - ******************************************************************************/ - -/** - * \brief Statically determine log2(N), rounded up. - * - * For example: - * Log2<8>::VALUE // 3 - * Log2<3>::VALUE // 2 - */ -template -struct Log2 -{ - /// Static logarithm value - enum { VALUE = Log2> 1), COUNT + 1>::VALUE }; // Inductive case -}; - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -template -struct Log2 -{ - enum {VALUE = (1 << (COUNT - 1) < N) ? // Base case - COUNT : - COUNT - 1 }; -}; - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - -/** - * \brief Statically determine if N is a power-of-two - */ -template -struct PowerOfTwo -{ - enum { VALUE = ((N & (N - 1)) == 0) }; -}; - - - -/****************************************************************************** - * Pointer vs. iterator detection - ******************************************************************************/ - -/** - * \brief Pointer vs. iterator - */ -template -struct IsPointer -{ - enum { VALUE = 0 }; -}; - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -template -struct IsPointer -{ - enum { VALUE = 1 }; -}; - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - - -/****************************************************************************** - * Qualifier detection - ******************************************************************************/ - -/** - * \brief Volatile modifier test - */ -template -struct IsVolatile -{ - enum { VALUE = 0 }; -}; - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -template -struct IsVolatile -{ - enum { VALUE = 1 }; -}; - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - -/****************************************************************************** - * Qualifier removal - ******************************************************************************/ - -/** - * \brief Removes \p const and \p volatile qualifiers from type \p Tp. - * - * For example: - * typename RemoveQualifiers::Type // int; - */ -template -struct RemoveQualifiers -{ - /// Type without \p const and \p volatile qualifiers - typedef Up Type; -}; - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -template -struct RemoveQualifiers -{ - typedef Up Type; -}; - -template -struct RemoveQualifiers -{ - typedef Up Type; -}; - -template -struct RemoveQualifiers -{ - typedef Up Type; -}; - - -/****************************************************************************** - * Marker types - ******************************************************************************/ - -/** - * \brief A simple "NULL" marker type - */ -struct NullType -{ -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - template - __host__ __device__ __forceinline__ NullType& operator =(const T&) { return *this; } - - __host__ __device__ __forceinline__ bool operator ==(const NullType&) { return true; } - - __host__ __device__ __forceinline__ bool operator !=(const NullType&) { return false; } - -#endif // DOXYGEN_SHOULD_SKIP_THIS -}; - - -/** - * \brief Allows for the treatment of an integral constant as a type at compile-time (e.g., to achieve static call dispatch based on constant integral values) - */ -template -struct Int2Type -{ - enum {VALUE = A}; -}; - - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - -/****************************************************************************** - * Size and alignment - ******************************************************************************/ - -/// Structure alignment -template -struct AlignBytes -{ - struct Pad - { - T val; - char byte; - }; - - enum - { - /// The "true CUDA" alignment of T in bytes - ALIGN_BYTES = sizeof(Pad) - sizeof(T) - }; - - /// The "truly aligned" type - typedef T Type; -}; - -// Specializations where host C++ compilers (e.g., 32-bit Windows) may disagree -// with device C++ compilers (EDG) on types passed as template parameters through -// kernel functions - -#define __CUB_ALIGN_BYTES(t, b) \ - template <> struct AlignBytes \ - { enum { ALIGN_BYTES = b }; typedef __align__(b) t Type; }; - -__CUB_ALIGN_BYTES(short4, 8) -__CUB_ALIGN_BYTES(ushort4, 8) -__CUB_ALIGN_BYTES(int2, 8) -__CUB_ALIGN_BYTES(uint2, 8) -__CUB_ALIGN_BYTES(long long, 8) -__CUB_ALIGN_BYTES(unsigned long long, 8) -__CUB_ALIGN_BYTES(float2, 8) -__CUB_ALIGN_BYTES(double, 8) -#ifdef _WIN32 - __CUB_ALIGN_BYTES(long2, 8) - __CUB_ALIGN_BYTES(ulong2, 8) -#else - __CUB_ALIGN_BYTES(long2, 16) - __CUB_ALIGN_BYTES(ulong2, 16) -#endif -__CUB_ALIGN_BYTES(int4, 16) -__CUB_ALIGN_BYTES(uint4, 16) -__CUB_ALIGN_BYTES(float4, 16) -__CUB_ALIGN_BYTES(long4, 16) -__CUB_ALIGN_BYTES(ulong4, 16) -__CUB_ALIGN_BYTES(longlong2, 16) -__CUB_ALIGN_BYTES(ulonglong2, 16) -__CUB_ALIGN_BYTES(double2, 16) -__CUB_ALIGN_BYTES(longlong4, 16) -__CUB_ALIGN_BYTES(ulonglong4, 16) -__CUB_ALIGN_BYTES(double4, 16) - -template struct AlignBytes : AlignBytes {}; -template struct AlignBytes : AlignBytes {}; -template struct AlignBytes : AlignBytes {}; - - -/// Unit-words of data movement -template -struct UnitWord -{ - enum { - ALIGN_BYTES = AlignBytes::ALIGN_BYTES - }; - - template - struct IsMultiple - { - enum { - UNIT_ALIGN_BYTES = AlignBytes::ALIGN_BYTES, - IS_MULTIPLE = (sizeof(T) % sizeof(Unit) == 0) && (ALIGN_BYTES % UNIT_ALIGN_BYTES == 0) - }; - }; - - /// Biggest shuffle word that T is a whole multiple of and is not larger than the alignment of T - typedef typename If::IS_MULTIPLE, - unsigned int, - typename If::IS_MULTIPLE, - unsigned short, - unsigned char>::Type>::Type ShuffleWord; - - /// Biggest volatile word that T is a whole multiple of and is not larger than the alignment of T - typedef typename If::IS_MULTIPLE, - unsigned long long, - ShuffleWord>::Type VolatileWord; - - /// Biggest memory-access word that T is a whole multiple of and is not larger than the alignment of T - typedef typename If::IS_MULTIPLE, - ulonglong2, - VolatileWord>::Type DeviceWord; - - /// Biggest texture reference word that T is a whole multiple of and is not larger than the alignment of T - typedef typename If::IS_MULTIPLE, - uint4, - typename If::IS_MULTIPLE, - uint2, - ShuffleWord>::Type>::Type TextureWord; -}; - - -// float2 specialization workaround (for SM10-SM13) -template <> -struct UnitWord -{ - typedef int ShuffleWord; -#if (CUB_PTX_ARCH > 0) && (CUB_PTX_ARCH <= 130) - typedef float VolatileWord; - typedef uint2 DeviceWord; -#else - typedef unsigned long long VolatileWord; - typedef unsigned long long DeviceWord; -#endif - typedef float2 TextureWord; -}; - -// float4 specialization workaround (for SM10-SM13) -template <> -struct UnitWord -{ - typedef int ShuffleWord; -#if (CUB_PTX_ARCH > 0) && (CUB_PTX_ARCH <= 130) - typedef float VolatileWord; - typedef uint4 DeviceWord; -#else - typedef unsigned long long VolatileWord; - typedef ulonglong2 DeviceWord; -#endif - typedef float4 TextureWord; -}; - - -// char2 specialization workaround (for SM10-SM13) -template <> -struct UnitWord -{ - typedef unsigned short ShuffleWord; -#if (CUB_PTX_ARCH > 0) && (CUB_PTX_ARCH <= 130) - typedef unsigned short VolatileWord; - typedef short DeviceWord; -#else - typedef unsigned short VolatileWord; - typedef unsigned short DeviceWord; -#endif - typedef unsigned short TextureWord; -}; - - -template struct UnitWord : UnitWord {}; -template struct UnitWord : UnitWord {}; -template struct UnitWord : UnitWord {}; - - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - - -/****************************************************************************** - * Vector type inference utilities. - ******************************************************************************/ - -/** - * \brief Exposes a member typedef \p Type that names the corresponding CUDA vector type if one exists. Otherwise \p Type refers to the CubVector structure itself, which will wrap the corresponding \p x, \p y, etc. vector fields. - */ -template struct CubVector; - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - -enum -{ - /// The maximum number of elements in CUDA vector types - MAX_VEC_ELEMENTS = 4, -}; - - -/** - * Generic vector-1 type - */ -template -struct CubVector -{ - T x; - - typedef T BaseType; - typedef CubVector Type; -}; - -/** - * Generic vector-2 type - */ -template -struct CubVector -{ - T x; - T y; - - typedef T BaseType; - typedef CubVector Type; -}; - -/** - * Generic vector-3 type - */ -template -struct CubVector -{ - T x; - T y; - T z; - - typedef T BaseType; - typedef CubVector Type; -}; - -/** - * Generic vector-4 type - */ -template -struct CubVector -{ - T x; - T y; - T z; - T w; - - typedef T BaseType; - typedef CubVector Type; -}; - - -/** - * Macro for expanding partially-specialized built-in vector types - */ -#define CUB_DEFINE_VECTOR_TYPE(base_type,short_type) \ - \ - template<> struct CubVector : short_type##1 \ - { \ - typedef base_type BaseType; \ - typedef short_type##1 Type; \ - __host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \ - CubVector retval; \ - retval.x = x + other.x; \ - return retval; \ - } \ - __host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \ - CubVector retval; \ - retval.x = x - other.x; \ - return retval; \ - } \ - }; \ - \ - template<> struct CubVector : short_type##2 \ - { \ - typedef base_type BaseType; \ - typedef short_type##2 Type; \ - __host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \ - CubVector retval; \ - retval.x = x + other.x; \ - retval.y = y + other.y; \ - return retval; \ - } \ - __host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \ - CubVector retval; \ - retval.x = x - other.x; \ - retval.y = y - other.y; \ - return retval; \ - } \ - }; \ - \ - template<> struct CubVector : short_type##3 \ - { \ - typedef base_type BaseType; \ - typedef short_type##3 Type; \ - __host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \ - CubVector retval; \ - retval.x = x + other.x; \ - retval.y = y + other.y; \ - retval.z = z + other.z; \ - return retval; \ - } \ - __host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \ - CubVector retval; \ - retval.x = x - other.x; \ - retval.y = y - other.y; \ - retval.z = z - other.z; \ - return retval; \ - } \ - }; \ - \ - template<> struct CubVector : short_type##4 \ - { \ - typedef base_type BaseType; \ - typedef short_type##4 Type; \ - __host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \ - CubVector retval; \ - retval.x = x + other.x; \ - retval.y = y + other.y; \ - retval.z = z + other.z; \ - retval.w = w + other.w; \ - return retval; \ - } \ - __host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \ - CubVector retval; \ - retval.x = x - other.x; \ - retval.y = y - other.y; \ - retval.z = z - other.z; \ - retval.w = w - other.w; \ - return retval; \ - } \ - }; - - - -// Expand CUDA vector types for built-in primitives -CUB_DEFINE_VECTOR_TYPE(char, char) -CUB_DEFINE_VECTOR_TYPE(signed char, char) -CUB_DEFINE_VECTOR_TYPE(short, short) -CUB_DEFINE_VECTOR_TYPE(int, int) -CUB_DEFINE_VECTOR_TYPE(long, long) -CUB_DEFINE_VECTOR_TYPE(long long, longlong) -CUB_DEFINE_VECTOR_TYPE(unsigned char, uchar) -CUB_DEFINE_VECTOR_TYPE(unsigned short, ushort) -CUB_DEFINE_VECTOR_TYPE(unsigned int, uint) -CUB_DEFINE_VECTOR_TYPE(unsigned long, ulong) -CUB_DEFINE_VECTOR_TYPE(unsigned long long, ulonglong) -CUB_DEFINE_VECTOR_TYPE(float, float) -CUB_DEFINE_VECTOR_TYPE(double, double) -CUB_DEFINE_VECTOR_TYPE(bool, uchar) - -// Undefine macros -#undef CUB_DEFINE_VECTOR_TYPE - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - - -/****************************************************************************** - * Wrapper types - ******************************************************************************/ - -/** - * \brief A storage-backing wrapper that allows types with non-trivial constructors to be aliased in unions - */ -template -struct Uninitialized -{ - /// Biggest memory-access word that T is a whole multiple of and is not larger than the alignment of T - typedef typename UnitWord::DeviceWord DeviceWord; - - enum - { - WORDS = sizeof(T) / sizeof(DeviceWord) - }; - - /// Backing storage - DeviceWord storage[WORDS]; - - /// Alias - __host__ __device__ __forceinline__ T& Alias() - { - return reinterpret_cast(*this); - } -}; - - -/** - * \brief A key identifier paired with a corresponding value - */ -template < - typename _Key, - typename _Value -#if defined(_WIN32) && !defined(_WIN64) - , bool KeyIsLT = (AlignBytes<_Key>::ALIGN_BYTES < AlignBytes<_Value>::ALIGN_BYTES) - , bool ValIsLT = (AlignBytes<_Value>::ALIGN_BYTES < AlignBytes<_Key>::ALIGN_BYTES) -#endif // #if defined(_WIN32) && !defined(_WIN64) - > -struct KeyValuePair -{ - typedef _Key Key; ///< Key data type - typedef _Value Value; ///< Value data type - - Key key; ///< Item key - Value value; ///< Item value - - /// Constructor - __host__ __device__ __forceinline__ - KeyValuePair() {} - - /// Constructor - __host__ __device__ __forceinline__ - KeyValuePair(Key const& key, Value const& value) : key(key), value(value) {} - - /// Inequality operator - __host__ __device__ __forceinline__ bool operator !=(const KeyValuePair &b) - { - return (value != b.value) || (key != b.key); - } -}; - -#if defined(_WIN32) && !defined(_WIN64) - -/** - * Win32 won't do 16B alignment. This can present two problems for - * should-be-16B-aligned (but actually 8B aligned) built-in and intrinsics members: - * 1) If a smaller-aligned item were to be listed first, the host compiler places the - * should-be-16B item at too early an offset (and disagrees with device compiler) - * 2) Or, if a smaller-aligned item lists second, the host compiler gets the size - * of the struct wrong (and disagrees with device compiler) - * - * So we put the larger-should-be-aligned item first, and explicitly pad the - * end of the struct - */ - -/// Smaller key specialization -template -struct KeyValuePair -{ - typedef K Key; - typedef V Value; - - typedef char Pad[AlignBytes::ALIGN_BYTES - AlignBytes::ALIGN_BYTES]; - - Value value; // Value has larger would-be alignment and goes first - Key key; - Pad pad; - - /// Constructor - __host__ __device__ __forceinline__ - KeyValuePair() {} - - /// Constructor - __host__ __device__ __forceinline__ - KeyValuePair(Key const& key, Value const& value) : key(key), value(value) {} - - /// Inequality operator - __host__ __device__ __forceinline__ bool operator !=(const KeyValuePair &b) - { - return (value != b.value) || (key != b.key); - } -}; - - -/// Smaller value specialization -template -struct KeyValuePair -{ - typedef K Key; - typedef V Value; - - typedef char Pad[AlignBytes::ALIGN_BYTES - AlignBytes::ALIGN_BYTES]; - - Key key; // Key has larger would-be alignment and goes first - Value value; - Pad pad; - - /// Constructor - __host__ __device__ __forceinline__ - KeyValuePair() {} - - /// Constructor - __host__ __device__ __forceinline__ - KeyValuePair(Key const& key, Value const& value) : key(key), value(value) {} - - /// Inequality operator - __host__ __device__ __forceinline__ bool operator !=(const KeyValuePair &b) - { - return (value != b.value) || (key != b.key); - } -}; - -#endif // #if defined(_WIN32) && !defined(_WIN64) - - -#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - -/** - * \brief A wrapper for passing simple static arrays as kernel parameters - */ -template -struct ArrayWrapper -{ - - /// Statically-sized array of type \p T - T array[COUNT]; - - /// Constructor - __host__ __device__ __forceinline__ ArrayWrapper() {} -}; - -#endif // DOXYGEN_SHOULD_SKIP_THIS - -/** - * \brief Double-buffer storage wrapper for multi-pass stream transformations that require more than one storage array for streaming intermediate results back and forth. - * - * Many multi-pass computations require a pair of "ping-pong" storage - * buffers (e.g., one for reading from and the other for writing to, and then - * vice-versa for the subsequent pass). This structure wraps a set of device - * buffers and a "selector" member to track which is "current". - */ -template -struct DoubleBuffer -{ - /// Pair of device buffer pointers - T *d_buffers[2]; - - /// Selector into \p d_buffers (i.e., the active/valid buffer) - int selector; - - /// \brief Constructor - __host__ __device__ __forceinline__ DoubleBuffer() - { - selector = 0; - d_buffers[0] = NULL; - d_buffers[1] = NULL; - } - - /// \brief Constructor - __host__ __device__ __forceinline__ DoubleBuffer( - T *d_current, ///< The currently valid buffer - T *d_alternate) ///< Alternate storage buffer of the same size as \p d_current - { - selector = 0; - d_buffers[0] = d_current; - d_buffers[1] = d_alternate; - } - - /// \brief Return pointer to the currently valid buffer - __host__ __device__ __forceinline__ T* Current() { return d_buffers[selector]; } - - /// \brief Return pointer to the currently invalid buffer - __host__ __device__ __forceinline__ T* Alternate() { return d_buffers[selector ^ 1]; } - -}; - - - -/****************************************************************************** - * Typedef-detection - ******************************************************************************/ - - -/** - * \brief Defines a structure \p detector_name that is templated on type \p T. The \p detector_name struct exposes a constant member \p VALUE indicating whether or not parameter \p T exposes a nested type \p nested_type_name - */ -#define CUB_DEFINE_DETECT_NESTED_TYPE(detector_name, nested_type_name) \ - template \ - struct detector_name \ - { \ - template \ - static char& test(typename C::nested_type_name*); \ - template \ - static int& test(...); \ - enum \ - { \ - VALUE = sizeof(test(0)) < sizeof(int) \ - }; \ - }; - - - -/****************************************************************************** - * Simple enable-if (similar to Boost) - ******************************************************************************/ - -/** - * \brief Simple enable-if (similar to Boost) - */ -template -struct EnableIf -{ - /// Enable-if type for SFINAE dummy variables - typedef T Type; -}; - - -template -struct EnableIf {}; - - - -/****************************************************************************** - * Typedef-detection - ******************************************************************************/ - -/** - * \brief Determine whether or not BinaryOp's functor is of the form bool operator()(const T& a, const T&b) or bool operator()(const T& a, const T&b, unsigned int idx) - */ -template -struct BinaryOpHasIdxParam -{ -private: -/* - template struct SFINAE1 {}; - template struct SFINAE2 {}; - template struct SFINAE3 {}; - template struct SFINAE4 {}; -*/ - template struct SFINAE5 {}; - template struct SFINAE6 {}; - template struct SFINAE7 {}; - template struct SFINAE8 {}; -/* - template static char Test(SFINAE1 *); - template static char Test(SFINAE2 *); - template static char Test(SFINAE3 *); - template static char Test(SFINAE4 *); -*/ - template static char Test(SFINAE5 *); - template static char Test(SFINAE6 *); - template static char Test(SFINAE7 *); - template static char Test(SFINAE8 *); - - template static int Test(...); - -public: - - /// Whether the functor BinaryOp has a third unsigned int index param - static const bool HAS_PARAM = sizeof(Test(NULL)) == sizeof(char); -}; - - - - -/****************************************************************************** - * Simple type traits utilities. - * - * For example: - * Traits::CATEGORY // SIGNED_INTEGER - * Traits::NULL_TYPE // true - * Traits::CATEGORY // NOT_A_NUMBER - * Traits::PRIMITIVE; // false - * - ******************************************************************************/ - -/** - * \brief Basic type traits categories - */ -enum Category -{ - NOT_A_NUMBER, - SIGNED_INTEGER, - UNSIGNED_INTEGER, - FLOATING_POINT -}; - - -/** - * \brief Basic type traits - */ -template -struct BaseTraits -{ - /// Category - static const Category CATEGORY = _CATEGORY; - enum - { - PRIMITIVE = _PRIMITIVE, - NULL_TYPE = _NULL_TYPE, - }; -}; - - -/** - * Basic type traits (unsigned primitive specialization) - */ -template -struct BaseTraits -{ - typedef _UnsignedBits UnsignedBits; - - static const Category CATEGORY = UNSIGNED_INTEGER; - static const UnsignedBits LOWEST_KEY = UnsignedBits(0); - static const UnsignedBits MAX_KEY = UnsignedBits(-1); - - enum - { - PRIMITIVE = true, - NULL_TYPE = false, - }; - - - static __device__ __forceinline__ UnsignedBits TwiddleIn(UnsignedBits key) - { - return key; - } - - static __device__ __forceinline__ UnsignedBits TwiddleOut(UnsignedBits key) - { - return key; - } - - static __host__ __device__ __forceinline__ T Max() - { - UnsignedBits retval = MAX_KEY; - return reinterpret_cast(retval); - } - - static __host__ __device__ __forceinline__ T Lowest() - { - UnsignedBits retval = LOWEST_KEY; - return reinterpret_cast(retval); - } -}; - - -/** - * Basic type traits (signed primitive specialization) - */ -template -struct BaseTraits -{ - typedef _UnsignedBits UnsignedBits; - - static const Category CATEGORY = SIGNED_INTEGER; - static const UnsignedBits HIGH_BIT = UnsignedBits(1) << ((sizeof(UnsignedBits) * 8) - 1); - static const UnsignedBits LOWEST_KEY = HIGH_BIT; - static const UnsignedBits MAX_KEY = UnsignedBits(-1) ^ HIGH_BIT; - - enum - { - PRIMITIVE = true, - NULL_TYPE = false, - }; - - static __device__ __forceinline__ UnsignedBits TwiddleIn(UnsignedBits key) - { - return key ^ HIGH_BIT; - }; - - static __device__ __forceinline__ UnsignedBits TwiddleOut(UnsignedBits key) - { - return key ^ HIGH_BIT; - }; - - static __host__ __device__ __forceinline__ T Max() - { - UnsignedBits retval = MAX_KEY; - return reinterpret_cast(retval); - } - - static __host__ __device__ __forceinline__ T Lowest() - { - UnsignedBits retval = LOWEST_KEY; - return reinterpret_cast(retval); - } -}; - -template -struct FpLimits; - -template <> -struct FpLimits -{ - static __host__ __device__ __forceinline__ float Max() { - return FLT_MAX; - } - - static __host__ __device__ __forceinline__ float Lowest() { - return FLT_MAX * float(-1); - } -}; - -template <> -struct FpLimits -{ - static __host__ __device__ __forceinline__ double Max() { - return DBL_MAX; - } - - static __host__ __device__ __forceinline__ double Lowest() { - return DBL_MAX * double(-1); - } -}; - - -/** - * Basic type traits (fp primitive specialization) - */ -template -struct BaseTraits -{ - typedef _UnsignedBits UnsignedBits; - - static const Category CATEGORY = FLOATING_POINT; - static const UnsignedBits HIGH_BIT = UnsignedBits(1) << ((sizeof(UnsignedBits) * 8) - 1); - static const UnsignedBits LOWEST_KEY = UnsignedBits(-1); - static const UnsignedBits MAX_KEY = UnsignedBits(-1) ^ HIGH_BIT; - - enum - { - PRIMITIVE = true, - NULL_TYPE = false, - }; - - static __device__ __forceinline__ UnsignedBits TwiddleIn(UnsignedBits key) - { - UnsignedBits mask = (key & HIGH_BIT) ? UnsignedBits(-1) : HIGH_BIT; - return key ^ mask; - }; - - static __device__ __forceinline__ UnsignedBits TwiddleOut(UnsignedBits key) - { - UnsignedBits mask = (key & HIGH_BIT) ? HIGH_BIT : UnsignedBits(-1); - return key ^ mask; - }; - - static __host__ __device__ __forceinline__ T Max() { - return FpLimits::Max(); - } - - static __host__ __device__ __forceinline__ T Lowest() { - return FpLimits::Lowest(); - } -}; - - -/** - * \brief Numeric type traits - */ -template struct NumericTraits : BaseTraits {}; - -template <> struct NumericTraits : BaseTraits {}; - -template <> struct NumericTraits : BaseTraits<(std::numeric_limits::is_signed) ? SIGNED_INTEGER : UNSIGNED_INTEGER, true, false, unsigned char, char> {}; -template <> struct NumericTraits : BaseTraits {}; -template <> struct NumericTraits : BaseTraits {}; -template <> struct NumericTraits : BaseTraits {}; -template <> struct NumericTraits : BaseTraits {}; -template <> struct NumericTraits : BaseTraits {}; - -template <> struct NumericTraits : BaseTraits {}; -template <> struct NumericTraits : BaseTraits {}; -template <> struct NumericTraits : BaseTraits {}; -template <> struct NumericTraits : BaseTraits {}; -template <> struct NumericTraits : BaseTraits {}; - -template <> struct NumericTraits : BaseTraits {}; -template <> struct NumericTraits : BaseTraits {}; - -template <> struct NumericTraits : BaseTraits::VolatileWord, bool> {}; - - - -/** - * \brief Type traits - */ -template -struct Traits : NumericTraits::Type> {}; - - -#endif // DOXYGEN_SHOULD_SKIP_THIS - - -/** @} */ // end group UtilModule - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/warp/specializations/warp_reduce_shfl.cuh b/ml-xgboost/cub/cub/warp/specializations/warp_reduce_shfl.cuh deleted file mode 100644 index 71a3bf0..0000000 --- a/ml-xgboost/cub/cub/warp/specializations/warp_reduce_shfl.cuh +++ /dev/null @@ -1,549 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::WarpReduceShfl provides SHFL-based variants of parallel reduction of items partitioned across a CUDA thread warp. - */ - -#pragma once - -#include "../../thread/thread_operators.cuh" -#include "../../util_ptx.cuh" -#include "../../util_type.cuh" -#include "../../util_macro.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \brief WarpReduceShfl provides SHFL-based variants of parallel reduction of items partitioned across a CUDA thread warp. - * - * LOGICAL_WARP_THREADS must be a power-of-two - */ -template < - typename T, ///< Data type being reduced - int LOGICAL_WARP_THREADS, ///< Number of threads per logical warp - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct WarpReduceShfl -{ - //--------------------------------------------------------------------- - // Constants and type definitions - //--------------------------------------------------------------------- - - enum - { - /// Whether the logical warp size and the PTX warp size coincide - IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), - - /// The number of warp reduction steps - STEPS = Log2::VALUE, - - /// Number of logical warps in a PTX warp - LOGICAL_WARPS = CUB_WARP_THREADS(PTX_ARCH) / LOGICAL_WARP_THREADS, - }; - - template - struct IsInteger - { - enum { - ///Whether the data type is a small (32b or less) integer for which we can use a single SFHL instruction per exchange - IS_SMALL_UNSIGNED = (Traits::CATEGORY == UNSIGNED_INTEGER) && (sizeof(S) <= sizeof(unsigned int)) - }; - }; - - - // Creates a mask where the last thread in each logical warp is set - template - struct LastLaneMask - { - enum { - BASE_MASK = 1 << (LOGICAL_WARP_THREADS - 1), - MASK = (LastLaneMask::MASK << LOGICAL_WARP_THREADS) | BASE_MASK, - }; - }; - - // Creates a mask where the last thread in each logical warp is set - template - struct LastLaneMask - { - enum { - MASK = 1 << (LOGICAL_WARP_THREADS - 1), - }; - }; - - - - /// Shared memory storage layout type - typedef NullType TempStorage; - - - //--------------------------------------------------------------------- - // Thread fields - //--------------------------------------------------------------------- - - int lane_id; - - int member_mask; - - //--------------------------------------------------------------------- - // Construction - //--------------------------------------------------------------------- - - /// Constructor - __device__ __forceinline__ WarpReduceShfl( - TempStorage &/*temp_storage*/) - : - lane_id(LaneId()), - - member_mask(IS_ARCH_WARP ? - 0xffffffff : - (0xffffffff >> (32 - LOGICAL_WARP_THREADS)) << (LaneId() / LOGICAL_WARP_THREADS)) - {} - - - //--------------------------------------------------------------------- - // Reduction steps - //--------------------------------------------------------------------- - - /// Reduction (specialized for summation across uint32 types) - __device__ __forceinline__ unsigned int ReduceStep( - unsigned int input, ///< [in] Calling thread's input item. - cub::Sum /*reduction_op*/, ///< [in] Binary reduction operator - int last_lane, ///< [in] Index of last lane in segment - int offset) ///< [in] Up-offset to pull from - { - unsigned int output; - - // Use predicate set from SHFL to guard against invalid peers -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile( - "{" - " .reg .u32 r0;" - " .reg .pred p;" - " shfl.sync.down.b32 r0|p, %1, %2, %3, %5;" - " @p add.u32 r0, r0, %4;" - " mov.u32 %0, r0;" - "}" - : "=r"(output) : "r"(input), "r"(offset), "r"(last_lane), "r"(input), "r"(member_mask)); -#else - asm volatile( - "{" - " .reg .u32 r0;" - " .reg .pred p;" - " shfl.down.b32 r0|p, %1, %2, %3;" - " @p add.u32 r0, r0, %4;" - " mov.u32 %0, r0;" - "}" - : "=r"(output) : "r"(input), "r"(offset), "r"(last_lane), "r"(input)); -#endif - - return output; - } - - - /// Reduction (specialized for summation across fp32 types) - __device__ __forceinline__ float ReduceStep( - float input, ///< [in] Calling thread's input item. - cub::Sum /*reduction_op*/, ///< [in] Binary reduction operator - int last_lane, ///< [in] Index of last lane in segment - int offset) ///< [in] Up-offset to pull from - { - float output; - - // Use predicate set from SHFL to guard against invalid peers -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile( - "{" - " .reg .f32 r0;" - " .reg .pred p;" - " shfl.sync.down.b32 r0|p, %1, %2, %3, %5;" - " @p add.f32 r0, r0, %4;" - " mov.f32 %0, r0;" - "}" - : "=f"(output) : "f"(input), "r"(offset), "r"(last_lane), "f"(input), "r"(member_mask)); -#else - asm volatile( - "{" - " .reg .f32 r0;" - " .reg .pred p;" - " shfl.down.b32 r0|p, %1, %2, %3;" - " @p add.f32 r0, r0, %4;" - " mov.f32 %0, r0;" - "}" - : "=f"(output) : "f"(input), "r"(offset), "r"(last_lane), "f"(input)); -#endif - - return output; - } - - - /// Reduction (specialized for summation across unsigned long long types) - __device__ __forceinline__ unsigned long long ReduceStep( - unsigned long long input, ///< [in] Calling thread's input item. - cub::Sum /*reduction_op*/, ///< [in] Binary reduction operator - int last_lane, ///< [in] Index of last lane in segment - int offset) ///< [in] Up-offset to pull from - { - unsigned long long output; - -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile( - "{" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " mov.b64 {lo, hi}, %1;" - " shfl.sync.down.b32 lo|p, lo, %2, %3, %4;" - " shfl.sync.down.b32 hi|p, hi, %2, %3, %4;" - " mov.b64 %0, {lo, hi};" - " @p add.u64 %0, %0, %1;" - "}" - : "=l"(output) : "l"(input), "r"(offset), "r"(last_lane), "r"(member_mask)); -#else - asm volatile( - "{" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " mov.b64 {lo, hi}, %1;" - " shfl.down.b32 lo|p, lo, %2, %3" - " shfl.down.b32 hi|p, hi, %2, %3;" - " mov.b64 %0, {lo, hi};" - " @p add.u64 %0, %0, %1;" - "}" - : "=l"(output) : "l"(input), "r"(offset), "r"(last_lane)); -#endif - - return output; - } - - - /// Reduction (specialized for summation across long long types) - __device__ __forceinline__ long long ReduceStep( - long long input, ///< [in] Calling thread's input item. - cub::Sum /*reduction_op*/, ///< [in] Binary reduction operator - int last_lane, ///< [in] Index of last lane in segment - int offset) ///< [in] Up-offset to pull from - { - long long output; - - // Use predicate set from SHFL to guard against invalid peers -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile( - "{" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " mov.b64 {lo, hi}, %1;" - " shfl.sync.down.b32 lo|p, lo, %2, %3, %4;" - " shfl.sync.down.b32 hi|p, hi, %2, %3, %4;" - " mov.b64 %0, {lo, hi};" - " @p add.s64 %0, %0, %1;" - "}" - : "=l"(output) : "l"(input), "r"(offset), "r"(last_lane), "r"(member_mask)); -#else - asm volatile( - "{" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " mov.b64 {lo, hi}, %1;" - " shfl.down.b32 lo|p, lo, %2, %3;" - " shfl.down.b32 hi|p, hi, %2, %3;" - " mov.b64 %0, {lo, hi};" - " @p add.s64 %0, %0, %1;" - "}" - : "=l"(output) : "l"(input), "r"(offset), "r"(last_lane)); -#endif - - return output; - } - - - /// Reduction (specialized for summation across double types) - __device__ __forceinline__ double ReduceStep( - double input, ///< [in] Calling thread's input item. - cub::Sum /*reduction_op*/, ///< [in] Binary reduction operator - int last_lane, ///< [in] Index of last lane in segment - int offset) ///< [in] Up-offset to pull from - { - double output; - - // Use predicate set from SHFL to guard against invalid peers -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile( - "{" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " .reg .f64 r0;" - " mov.b64 %0, %1;" - " mov.b64 {lo, hi}, %1;" - " shfl.sync.down.b32 lo|p, lo, %2, %3, %4;" - " shfl.sync.down.b32 hi|p, hi, %2, %3, %4;" - " mov.b64 r0, {lo, hi};" - " @p add.f64 %0, %0, r0;" - "}" - : "=d"(output) : "d"(input), "r"(offset), "r"(last_lane), "r"(member_mask)); -#else - asm volatile( - "{" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " .reg .f64 r0;" - " mov.b64 %0, %1;" - " mov.b64 {lo, hi}, %1;" - " shfl.down.b32 lo|p, lo, %2, %3;" - " shfl.down.b32 hi|p, hi, %2, %3;" - " mov.b64 r0, {lo, hi};" - " @p add.f64 %0, %0, r0;" - "}" - : "=d"(output) : "d"(input), "r"(offset), "r"(last_lane)); -#endif - - return output; - } - - - /// Reduction (specialized for swizzled ReduceByKeyOp across KeyValuePair types) - template - __device__ __forceinline__ KeyValuePair ReduceStep( - KeyValuePair input, ///< [in] Calling thread's input item. - SwizzleScanOp > /*reduction_op*/, ///< [in] Binary reduction operator - int last_lane, ///< [in] Index of last lane in segment - int offset) ///< [in] Up-offset to pull from - { - KeyValuePair output; - - KeyT other_key = ShuffleDown(input.key, offset, last_lane, member_mask); - - output.key = input.key; - output.value = ReduceStep( - input.value, - cub::Sum(), - last_lane, - offset, - Int2Type::IS_SMALL_UNSIGNED>()); - - if (input.key != other_key) - output.value = input.value; - - return output; - } - - - - /// Reduction (specialized for swizzled ReduceBySegmentOp across KeyValuePair types) - template - __device__ __forceinline__ KeyValuePair ReduceStep( - KeyValuePair input, ///< [in] Calling thread's input item. - SwizzleScanOp > /*reduction_op*/, ///< [in] Binary reduction operator - int last_lane, ///< [in] Index of last lane in segment - int offset) ///< [in] Up-offset to pull from - { - KeyValuePair output; - - output.value = ReduceStep(input.value, cub::Sum(), last_lane, offset, Int2Type::IS_SMALL_UNSIGNED>()); - output.key = ReduceStep(input.key, cub::Sum(), last_lane, offset, Int2Type::IS_SMALL_UNSIGNED>()); - - if (input.key > 0) - output.value = input.value; - - return output; - } - - - /// Reduction step (generic) - template - __device__ __forceinline__ _T ReduceStep( - _T input, ///< [in] Calling thread's input item. - ReductionOp reduction_op, ///< [in] Binary reduction operator - int last_lane, ///< [in] Index of last lane in segment - int offset) ///< [in] Up-offset to pull from - { - _T output = input; - - _T temp = ShuffleDown(output, offset, last_lane, member_mask); - - // Perform reduction op if valid - if (offset + lane_id <= last_lane) - output = reduction_op(input, temp); - - return output; - } - - - /// Reduction step (specialized for small unsigned integers size 32b or less) - template - __device__ __forceinline__ _T ReduceStep( - _T input, ///< [in] Calling thread's input item. - ReductionOp reduction_op, ///< [in] Binary reduction operator - int last_lane, ///< [in] Index of last lane in segment - int offset, ///< [in] Up-offset to pull from - Int2Type /*is_small_unsigned*/) ///< [in] Marker type indicating whether T is a small unsigned integer - { - return ReduceStep(input, reduction_op, last_lane, offset); - } - - - /// Reduction step (specialized for types other than small unsigned integers size 32b or less) - template - __device__ __forceinline__ _T ReduceStep( - _T input, ///< [in] Calling thread's input item. - ReductionOp reduction_op, ///< [in] Binary reduction operator - int last_lane, ///< [in] Index of last lane in segment - int offset, ///< [in] Up-offset to pull from - Int2Type /*is_small_unsigned*/) ///< [in] Marker type indicating whether T is a small unsigned integer - { - return ReduceStep(input, reduction_op, last_lane, offset); - } - - - //--------------------------------------------------------------------- - // Templated inclusive scan iteration - //--------------------------------------------------------------------- - - template - __device__ __forceinline__ void ReduceStep( - T& input, ///< [in] Calling thread's input item. - ReductionOp reduction_op, ///< [in] Binary reduction operator - int last_lane, ///< [in] Index of last lane in segment - Int2Type /*step*/) - { - input = ReduceStep(input, reduction_op, last_lane, 1 << STEP, Int2Type::IS_SMALL_UNSIGNED>()); - - ReduceStep(input, reduction_op, last_lane, Int2Type()); - } - - template - __device__ __forceinline__ void ReduceStep( - T& /*input*/, ///< [in] Calling thread's input item. - ReductionOp /*reduction_op*/, ///< [in] Binary reduction operator - int /*last_lane*/, ///< [in] Index of last lane in segment - Int2Type /*step*/) - {} - - - //--------------------------------------------------------------------- - // Reduction operations - //--------------------------------------------------------------------- - - /// Reduction - template < - bool ALL_LANES_VALID, ///< Whether all lanes in each warp are contributing a valid fold of items - int FOLDED_ITEMS_PER_LANE, ///< Number of items folded into each lane - typename ReductionOp> - __device__ __forceinline__ T Reduce( - T input, ///< [in] Calling thread's input - int folded_items_per_warp, ///< [in] Total number of valid items folded into each logical warp - ReductionOp reduction_op) ///< [in] Binary reduction operator - { - // Get the last thread in the logical warp - int first_warp_thread = 0; - int last_warp_thread = LOGICAL_WARP_THREADS - 1; - if (!IS_ARCH_WARP) - { - first_warp_thread = lane_id & (~(LOGICAL_WARP_THREADS - 1)); - last_warp_thread |= lane_id; - } - - // Common case is FOLDED_ITEMS_PER_LANE = 1 (or a multiple of 32) - int lanes_with_valid_data = (folded_items_per_warp - 1) / FOLDED_ITEMS_PER_LANE; - - // Get the last valid lane - int last_lane = (ALL_LANES_VALID) ? - last_warp_thread : - CUB_MIN(last_warp_thread, first_warp_thread + lanes_with_valid_data); - - T output = input; - -// // Iterate reduction steps -// #pragma unroll -// for (int STEP = 0; STEP < STEPS; STEP++) -// { -// output = ReduceStep(output, reduction_op, last_lane, 1 << STEP, Int2Type::IS_SMALL_UNSIGNED>()); -// } - - // Template-iterate reduction steps - ReduceStep(output, reduction_op, last_lane, Int2Type<0>()); - - return output; - } - - - /// Segmented reduction - template < - bool HEAD_SEGMENTED, ///< Whether flags indicate a segment-head or a segment-tail - typename FlagT, - typename ReductionOp> - __device__ __forceinline__ T SegmentedReduce( - T input, ///< [in] Calling thread's input - FlagT flag, ///< [in] Whether or not the current lane is a segment head/tail - ReductionOp reduction_op) ///< [in] Binary reduction operator - { - // Get the start flags for each thread in the warp. - int warp_flags = WARP_BALLOT(flag, member_mask); - - if (HEAD_SEGMENTED) - warp_flags >>= 1; - - // Mask in the last lanes of each logical warp - warp_flags |= LastLaneMask<1, LOGICAL_WARPS>::MASK; - - // Mask out the bits below the current thread - warp_flags &= LaneMaskGe(); - - // Find the next set flag - int last_lane = __clz(__brev(warp_flags)); - - T output = input; - -// // Iterate reduction steps -// #pragma unroll -// for (int STEP = 0; STEP < STEPS; STEP++) -// { -// output = ReduceStep(output, reduction_op, last_lane, 1 << STEP, Int2Type::IS_SMALL_UNSIGNED>()); -// } - - // Template-iterate reduction steps - ReduceStep(output, reduction_op, last_lane, Int2Type<0>()); - - return output; - } -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/warp/specializations/warp_reduce_smem.cuh b/ml-xgboost/cub/cub/warp/specializations/warp_reduce_smem.cuh deleted file mode 100644 index 871a256..0000000 --- a/ml-xgboost/cub/cub/warp/specializations/warp_reduce_smem.cuh +++ /dev/null @@ -1,373 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::WarpReduceSmem provides smem-based variants of parallel reduction of items partitioned across a CUDA thread warp. - */ - -#pragma once - -#include "../../thread/thread_operators.cuh" -#include "../../thread/thread_load.cuh" -#include "../../thread/thread_store.cuh" -#include "../../util_type.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief WarpReduceSmem provides smem-based variants of parallel reduction of items partitioned across a CUDA thread warp. - */ -template < - typename T, ///< Data type being reduced - int LOGICAL_WARP_THREADS, ///< Number of threads per logical warp - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct WarpReduceSmem -{ - /****************************************************************************** - * Constants and type definitions - ******************************************************************************/ - - enum - { - /// Whether the logical warp size and the PTX warp size coincide - IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), - - /// Whether the logical warp size is a power-of-two - IS_POW_OF_TWO = PowerOfTwo::VALUE, - - /// The number of warp scan steps - STEPS = Log2::VALUE, - - /// The number of threads in half a warp - HALF_WARP_THREADS = 1 << (STEPS - 1), - - /// The number of shared memory elements per warp - WARP_SMEM_ELEMENTS = LOGICAL_WARP_THREADS + HALF_WARP_THREADS, - - /// FlagT status (when not using ballot) - UNSET = 0x0, // Is initially unset - SET = 0x1, // Is initially set - SEEN = 0x2, // Has seen another head flag from a successor peer - }; - - /// Shared memory flag type - typedef unsigned char SmemFlag; - - /// Shared memory storage layout type (1.5 warps-worth of elements for each warp) - struct _TempStorage - { - T reduce[WARP_SMEM_ELEMENTS]; - SmemFlag flags[WARP_SMEM_ELEMENTS]; - }; - - // Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - _TempStorage &temp_storage; - unsigned int lane_id; - unsigned int member_mask; - - - /****************************************************************************** - * Construction - ******************************************************************************/ - - /// Constructor - __device__ __forceinline__ WarpReduceSmem( - TempStorage &temp_storage) - : - temp_storage(temp_storage.Alias()), - lane_id(IS_ARCH_WARP ? - LaneId() : - LaneId() % LOGICAL_WARP_THREADS), - member_mask(!IS_POW_OF_TWO ? - (0xffffffff >> (32 - LOGICAL_WARP_THREADS)) : // non-power-of-two subwarps cannot be tiled - (0xffffffff >> (32 - LOGICAL_WARP_THREADS)) << (LaneId() / LOGICAL_WARP_THREADS)) - {} - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - //--------------------------------------------------------------------- - // Regular reduction - //--------------------------------------------------------------------- - - /** - * Reduction step - */ - template < - bool ALL_LANES_VALID, ///< Whether all lanes in each warp are contributing a valid fold of items - int FOLDED_ITEMS_PER_LANE, ///< Number of items folded into each lane - typename ReductionOp, - int STEP> - __device__ __forceinline__ T ReduceStep( - T input, ///< [in] Calling thread's input - int folded_items_per_warp, ///< [in] Total number of valid items folded into each logical warp - ReductionOp reduction_op, ///< [in] Reduction operator - Int2Type /*step*/) - { - const int OFFSET = 1 << STEP; - - // Share input through buffer - ThreadStore(&temp_storage.reduce[lane_id], input); - - WARP_SYNC(member_mask); - - // Update input if peer_addend is in range - if ((ALL_LANES_VALID && IS_POW_OF_TWO) || ((lane_id + OFFSET) * FOLDED_ITEMS_PER_LANE < folded_items_per_warp)) - { - T peer_addend = ThreadLoad(&temp_storage.reduce[lane_id + OFFSET]); - input = reduction_op(input, peer_addend); - } - - WARP_SYNC(member_mask); - - return ReduceStep(input, folded_items_per_warp, reduction_op, Int2Type()); - } - - - /** - * Reduction step (terminate) - */ - template < - bool ALL_LANES_VALID, ///< Whether all lanes in each warp are contributing a valid fold of items - int FOLDED_ITEMS_PER_LANE, ///< Number of items folded into each lane - typename ReductionOp> - __device__ __forceinline__ T ReduceStep( - T input, ///< [in] Calling thread's input - int /*folded_items_per_warp*/, ///< [in] Total number of valid items folded into each logical warp - ReductionOp /*reduction_op*/, ///< [in] Reduction operator - Int2Type /*step*/) - { - return input; - } - - - //--------------------------------------------------------------------- - // Segmented reduction - //--------------------------------------------------------------------- - - - /** - * Ballot-based segmented reduce - */ - template < - bool HEAD_SEGMENTED, ///< Whether flags indicate a segment-head or a segment-tail - typename FlagT, - typename ReductionOp> - __device__ __forceinline__ T SegmentedReduce( - T input, ///< [in] Calling thread's input - FlagT flag, ///< [in] Whether or not the current lane is a segment head/tail - ReductionOp reduction_op, ///< [in] Reduction operator - Int2Type /*has_ballot*/) ///< [in] Marker type for whether the target arch has ballot functionality - { - // Get the start flags for each thread in the warp. - int warp_flags = WARP_BALLOT(flag, member_mask); - - if (!HEAD_SEGMENTED) - warp_flags <<= 1; - - // Keep bits above the current thread. - warp_flags &= LaneMaskGt(); - - // Accommodate packing of multiple logical warps in a single physical warp - if (!IS_ARCH_WARP) - { - warp_flags >>= (LaneId() / LOGICAL_WARP_THREADS) * LOGICAL_WARP_THREADS; - } - - // Find next flag - int next_flag = __clz(__brev(warp_flags)); - - // Clip the next segment at the warp boundary if necessary - if (LOGICAL_WARP_THREADS != 32) - next_flag = CUB_MIN(next_flag, LOGICAL_WARP_THREADS); - - #pragma unroll - for (int STEP = 0; STEP < STEPS; STEP++) - { - const int OFFSET = 1 << STEP; - - // Share input into buffer - ThreadStore(&temp_storage.reduce[lane_id], input); - - WARP_SYNC(member_mask); - - // Update input if peer_addend is in range - if (OFFSET + lane_id < next_flag) - { - T peer_addend = ThreadLoad(&temp_storage.reduce[lane_id + OFFSET]); - input = reduction_op(input, peer_addend); - } - - WARP_SYNC(member_mask); - } - - return input; - } - - - /** - * Smem-based segmented reduce - */ - template < - bool HEAD_SEGMENTED, ///< Whether flags indicate a segment-head or a segment-tail - typename FlagT, - typename ReductionOp> - __device__ __forceinline__ T SegmentedReduce( - T input, ///< [in] Calling thread's input - FlagT flag, ///< [in] Whether or not the current lane is a segment head/tail - ReductionOp reduction_op, ///< [in] Reduction operator - Int2Type /*has_ballot*/) ///< [in] Marker type for whether the target arch has ballot functionality - { - enum - { - UNSET = 0x0, // Is initially unset - SET = 0x1, // Is initially set - SEEN = 0x2, // Has seen another head flag from a successor peer - }; - - // Alias flags onto shared data storage - volatile SmemFlag *flag_storage = temp_storage.flags; - - SmemFlag flag_status = (flag) ? SET : UNSET; - - for (int STEP = 0; STEP < STEPS; STEP++) - { - const int OFFSET = 1 << STEP; - - // Share input through buffer - ThreadStore(&temp_storage.reduce[lane_id], input); - - WARP_SYNC(member_mask); - - // Get peer from buffer - T peer_addend = ThreadLoad(&temp_storage.reduce[lane_id + OFFSET]); - - WARP_SYNC(member_mask); - - // Share flag through buffer - flag_storage[lane_id] = flag_status; - - // Get peer flag from buffer - SmemFlag peer_flag_status = flag_storage[lane_id + OFFSET]; - - // Update input if peer was in range - if (lane_id < LOGICAL_WARP_THREADS - OFFSET) - { - if (HEAD_SEGMENTED) - { - // Head-segmented - if ((flag_status & SEEN) == 0) - { - // Has not seen a more distant head flag - if (peer_flag_status & SET) - { - // Has now seen a head flag - flag_status |= SEEN; - } - else - { - // Peer is not a head flag: grab its count - input = reduction_op(input, peer_addend); - } - - // Update seen status to include that of peer - flag_status |= (peer_flag_status & SEEN); - } - } - else - { - // Tail-segmented. Simply propagate flag status - if (!flag_status) - { - input = reduction_op(input, peer_addend); - flag_status |= peer_flag_status; - } - - } - } - } - - return input; - } - - - /****************************************************************************** - * Interface - ******************************************************************************/ - - /** - * Reduction - */ - template < - bool ALL_LANES_VALID, ///< Whether all lanes in each warp are contributing a valid fold of items - int FOLDED_ITEMS_PER_LANE, ///< Number of items folded into each lane - typename ReductionOp> - __device__ __forceinline__ T Reduce( - T input, ///< [in] Calling thread's input - int folded_items_per_warp, ///< [in] Total number of valid items folded into each logical warp - ReductionOp reduction_op) ///< [in] Reduction operator - { - return ReduceStep(input, folded_items_per_warp, reduction_op, Int2Type<0>()); - } - - - /** - * Segmented reduction - */ - template < - bool HEAD_SEGMENTED, ///< Whether flags indicate a segment-head or a segment-tail - typename FlagT, - typename ReductionOp> - __device__ __forceinline__ T SegmentedReduce( - T input, ///< [in] Calling thread's input - FlagT flag, ///< [in] Whether or not the current lane is a segment head/tail - ReductionOp reduction_op) ///< [in] Reduction operator - { - return SegmentedReduce(input, flag, reduction_op, Int2Type<(PTX_ARCH >= 200)>()); - } - - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/warp/specializations/warp_scan_shfl.cuh b/ml-xgboost/cub/cub/warp/specializations/warp_scan_shfl.cuh deleted file mode 100644 index b126c44..0000000 --- a/ml-xgboost/cub/cub/warp/specializations/warp_scan_shfl.cuh +++ /dev/null @@ -1,650 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::WarpScanShfl provides SHFL-based variants of parallel prefix scan of items partitioned across a CUDA thread warp. - */ - -#pragma once - -#include "../../thread/thread_operators.cuh" -#include "../../util_type.cuh" -#include "../../util_ptx.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief WarpScanShfl provides SHFL-based variants of parallel prefix scan of items partitioned across a CUDA thread warp. - */ -template < - typename T, ///< Data type being scanned - int LOGICAL_WARP_THREADS, ///< Number of threads per logical warp - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct WarpScanShfl -{ - //--------------------------------------------------------------------- - // Constants and type definitions - //--------------------------------------------------------------------- - - enum - { - /// Whether the logical warp size and the PTX warp size coincide - IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), - - /// The number of warp scan steps - STEPS = Log2::VALUE, - - /// The 5-bit SHFL mask for logically splitting warps into sub-segments starts 8-bits up - SHFL_C = ((-1 << STEPS) & 31) << 8, - }; - - template - struct IntegerTraits - { - enum { - ///Whether the data type is a small (32b or less) integer for which we can use a single SFHL instruction per exchange - IS_SMALL_UNSIGNED = (Traits::CATEGORY == UNSIGNED_INTEGER) && (sizeof(S) <= sizeof(unsigned int)) - }; - }; - - /// Shared memory storage layout type - struct TempStorage {}; - - - //--------------------------------------------------------------------- - // Thread fields - //--------------------------------------------------------------------- - - unsigned int lane_id; - - unsigned int member_mask; - - //--------------------------------------------------------------------- - // Construction - //--------------------------------------------------------------------- - - /// Constructor - __device__ __forceinline__ WarpScanShfl( - TempStorage &/*temp_storage*/) - : - lane_id(IS_ARCH_WARP ? - LaneId() : - LaneId() % LOGICAL_WARP_THREADS), - member_mask(IS_ARCH_WARP ? - 0xffffffff : - (0xffffffff >> (32 - LOGICAL_WARP_THREADS)) << (LaneId() / LOGICAL_WARP_THREADS)) - {} - - - //--------------------------------------------------------------------- - // Inclusive scan steps - //--------------------------------------------------------------------- - - /// Inclusive prefix scan step (specialized for summation across int32 types) - __device__ __forceinline__ int InclusiveScanStep( - int input, ///< [in] Calling thread's input item. - cub::Sum /*scan_op*/, ///< [in] Binary scan operator - int first_lane, ///< [in] Index of first lane in segment - int offset) ///< [in] Up-offset to pull from - { - int output; - int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) - - // Use predicate set from SHFL to guard against invalid peers -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile( - "{" - " .reg .s32 r0;" - " .reg .pred p;" - " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" - " @p add.s32 r0, r0, %4;" - " mov.s32 %0, r0;" - "}" - : "=r"(output) : "r"(input), "r"(offset), "r"(shfl_c), "r"(input), "r"(member_mask)); -#else - asm volatile( - "{" - " .reg .s32 r0;" - " .reg .pred p;" - " shfl.up.b32 r0|p, %1, %2, %3;" - " @p add.s32 r0, r0, %4;" - " mov.s32 %0, r0;" - "}" - : "=r"(output) : "r"(input), "r"(offset), "r"(shfl_c), "r"(input)); -#endif - - return output; - } - - /// Inclusive prefix scan step (specialized for summation across uint32 types) - __device__ __forceinline__ unsigned int InclusiveScanStep( - unsigned int input, ///< [in] Calling thread's input item. - cub::Sum /*scan_op*/, ///< [in] Binary scan operator - int first_lane, ///< [in] Index of first lane in segment - int offset) ///< [in] Up-offset to pull from - { - unsigned int output; - int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) - - // Use predicate set from SHFL to guard against invalid peers -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile( - "{" - " .reg .u32 r0;" - " .reg .pred p;" - " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" - " @p add.u32 r0, r0, %4;" - " mov.u32 %0, r0;" - "}" - : "=r"(output) : "r"(input), "r"(offset), "r"(shfl_c), "r"(input), "r"(member_mask)); -#else - asm volatile( - "{" - " .reg .u32 r0;" - " .reg .pred p;" - " shfl.up.b32 r0|p, %1, %2, %3;" - " @p add.u32 r0, r0, %4;" - " mov.u32 %0, r0;" - "}" - : "=r"(output) : "r"(input), "r"(offset), "r"(shfl_c), "r"(input)); -#endif - - return output; - } - - - /// Inclusive prefix scan step (specialized for summation across fp32 types) - __device__ __forceinline__ float InclusiveScanStep( - float input, ///< [in] Calling thread's input item. - cub::Sum /*scan_op*/, ///< [in] Binary scan operator - int first_lane, ///< [in] Index of first lane in segment - int offset) ///< [in] Up-offset to pull from - { - float output; - int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) - - // Use predicate set from SHFL to guard against invalid peers -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile( - "{" - " .reg .f32 r0;" - " .reg .pred p;" - " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" - " @p add.f32 r0, r0, %4;" - " mov.f32 %0, r0;" - "}" - : "=f"(output) : "f"(input), "r"(offset), "r"(shfl_c), "f"(input), "r"(member_mask)); -#else - asm volatile( - "{" - " .reg .f32 r0;" - " .reg .pred p;" - " shfl.up.b32 r0|p, %1, %2, %3;" - " @p add.f32 r0, r0, %4;" - " mov.f32 %0, r0;" - "}" - : "=f"(output) : "f"(input), "r"(offset), "r"(shfl_c), "f"(input)); -#endif - - return output; - } - - - /// Inclusive prefix scan step (specialized for summation across unsigned long long types) - __device__ __forceinline__ unsigned long long InclusiveScanStep( - unsigned long long input, ///< [in] Calling thread's input item. - cub::Sum /*scan_op*/, ///< [in] Binary scan operator - int first_lane, ///< [in] Index of first lane in segment - int offset) ///< [in] Up-offset to pull from - { - unsigned long long output; - int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) - - // Use predicate set from SHFL to guard against invalid peers -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile( - "{" - " .reg .u64 r0;" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " mov.b64 {lo, hi}, %1;" - " shfl.sync.up.b32 lo|p, lo, %2, %3, %5;" - " shfl.sync.up.b32 hi|p, hi, %2, %3, %5;" - " mov.b64 r0, {lo, hi};" - " @p add.u64 r0, r0, %4;" - " mov.u64 %0, r0;" - "}" - : "=l"(output) : "l"(input), "r"(offset), "r"(shfl_c), "l"(input), "r"(member_mask)); -#else - asm volatile( - "{" - " .reg .u64 r0;" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " mov.b64 {lo, hi}, %1;" - " shfl.up.b32 lo|p, lo, %2, %3;" - " shfl.up.b32 hi|p, hi, %2, %3;" - " mov.b64 r0, {lo, hi};" - " @p add.u64 r0, r0, %4;" - " mov.u64 %0, r0;" - "}" - : "=l"(output) : "l"(input), "r"(offset), "r"(shfl_c), "l"(input)); -#endif - - return output; - } - - - /// Inclusive prefix scan step (specialized for summation across long long types) - __device__ __forceinline__ long long InclusiveScanStep( - long long input, ///< [in] Calling thread's input item. - cub::Sum /*scan_op*/, ///< [in] Binary scan operator - int first_lane, ///< [in] Index of first lane in segment - int offset) ///< [in] Up-offset to pull from - { - long long output; - int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) - - // Use predicate set from SHFL to guard against invalid peers -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile( - "{" - " .reg .s64 r0;" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " mov.b64 {lo, hi}, %1;" - " shfl.sync.up.b32 lo|p, lo, %2, %3, %5;" - " shfl.sync.up.b32 hi|p, hi, %2, %3, %5;" - " mov.b64 r0, {lo, hi};" - " @p add.s64 r0, r0, %4;" - " mov.s64 %0, r0;" - "}" - : "=l"(output) : "l"(input), "r"(offset), "r"(shfl_c), "l"(input), "r"(member_mask)); -#else - asm volatile( - "{" - " .reg .s64 r0;" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " mov.b64 {lo, hi}, %1;" - " shfl.up.b32 lo|p, lo, %2, %3;" - " shfl.up.b32 hi|p, hi, %2, %3;" - " mov.b64 r0, {lo, hi};" - " @p add.s64 r0, r0, %4;" - " mov.s64 %0, r0;" - "}" - : "=l"(output) : "l"(input), "r"(offset), "r"(shfl_c), "l"(input)); -#endif - - return output; - } - - - /// Inclusive prefix scan step (specialized for summation across fp64 types) - __device__ __forceinline__ double InclusiveScanStep( - double input, ///< [in] Calling thread's input item. - cub::Sum /*scan_op*/, ///< [in] Binary scan operator - int first_lane, ///< [in] Index of first lane in segment - int offset) ///< [in] Up-offset to pull from - { - double output; - int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) - - // Use predicate set from SHFL to guard against invalid peers -#ifdef CUB_USE_COOPERATIVE_GROUPS - asm volatile( - "{" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " .reg .f64 r0;" - " mov.b64 %0, %1;" - " mov.b64 {lo, hi}, %1;" - " shfl.sync.up.b32 lo|p, lo, %2, %3, %4;" - " shfl.sync.up.b32 hi|p, hi, %2, %3, %4;" - " mov.b64 r0, {lo, hi};" - " @p add.f64 %0, %0, r0;" - "}" - : "=d"(output) : "d"(input), "r"(offset), "r"(shfl_c), "r"(member_mask)); -#else - asm volatile( - "{" - " .reg .u32 lo;" - " .reg .u32 hi;" - " .reg .pred p;" - " .reg .f64 r0;" - " mov.b64 %0, %1;" - " mov.b64 {lo, hi}, %1;" - " shfl.up.b32 lo|p, lo, %2, %3;" - " shfl.up.b32 hi|p, hi, %2, %3;" - " mov.b64 r0, {lo, hi};" - " @p add.f64 %0, %0, r0;" - "}" - : "=d"(output) : "d"(input), "r"(offset), "r"(shfl_c)); -#endif - - return output; - } - - -/* - /// Inclusive prefix scan (specialized for ReduceBySegmentOp across KeyValuePair types) - template - __device__ __forceinline__ KeyValuePairInclusiveScanStep( - KeyValuePair input, ///< [in] Calling thread's input item. - ReduceBySegmentOp scan_op, ///< [in] Binary scan operator - int first_lane, ///< [in] Index of first lane in segment - int offset) ///< [in] Up-offset to pull from - { - KeyValuePair output; - - output.value = InclusiveScanStep(input.value, cub::Sum(), first_lane, offset, Int2Type::IS_SMALL_UNSIGNED>()); - output.key = InclusiveScanStep(input.key, cub::Sum(), first_lane, offset, Int2Type::IS_SMALL_UNSIGNED>()); - - if (input.key > 0) - output.value = input.value; - - return output; - } -*/ - - /// Inclusive prefix scan step (generic) - template - __device__ __forceinline__ _T InclusiveScanStep( - _T input, ///< [in] Calling thread's input item. - ScanOpT scan_op, ///< [in] Binary scan operator - int first_lane, ///< [in] Index of first lane in segment - int offset) ///< [in] Up-offset to pull from - { - _T temp = ShuffleUp(input, offset, first_lane, member_mask); - - // Perform scan op if from a valid peer - _T output = scan_op(temp, input); - if (static_cast(lane_id) < first_lane + offset) - output = input; - - return output; - } - - - /// Inclusive prefix scan step (specialized for small integers size 32b or less) - template - __device__ __forceinline__ _T InclusiveScanStep( - _T input, ///< [in] Calling thread's input item. - ScanOpT scan_op, ///< [in] Binary scan operator - int first_lane, ///< [in] Index of first lane in segment - int offset, ///< [in] Up-offset to pull from - Int2Type /*is_small_unsigned*/) ///< [in] Marker type indicating whether T is a small integer - { - return InclusiveScanStep(input, scan_op, first_lane, offset); - } - - - /// Inclusive prefix scan step (specialized for types other than small integers size 32b or less) - template - __device__ __forceinline__ _T InclusiveScanStep( - _T input, ///< [in] Calling thread's input item. - ScanOpT scan_op, ///< [in] Binary scan operator - int first_lane, ///< [in] Index of first lane in segment - int offset, ///< [in] Up-offset to pull from - Int2Type /*is_small_unsigned*/) ///< [in] Marker type indicating whether T is a small integer - { - return InclusiveScanStep(input, scan_op, first_lane, offset); - } - - //--------------------------------------------------------------------- - // Templated inclusive scan iteration - //--------------------------------------------------------------------- - - template - __device__ __forceinline__ void InclusiveScanStep( - _T& input, ///< [in] Calling thread's input item. - ScanOp scan_op, ///< [in] Binary scan operator - int first_lane, ///< [in] Index of first lane in segment - Int2Type /*step*/) ///< [in] Marker type indicating scan step - { - input = InclusiveScanStep(input, scan_op, first_lane, 1 << STEP, Int2Type::IS_SMALL_UNSIGNED>()); - - InclusiveScanStep(input, scan_op, first_lane, Int2Type()); - } - - template - __device__ __forceinline__ void InclusiveScanStep( - _T& /*input*/, ///< [in] Calling thread's input item. - ScanOp /*scan_op*/, ///< [in] Binary scan operator - int /*first_lane*/, ///< [in] Index of first lane in segment - Int2Type /*step*/) ///< [in] Marker type indicating scan step - {} - - - /****************************************************************************** - * Interface - ******************************************************************************/ - - //--------------------------------------------------------------------- - // Broadcast - //--------------------------------------------------------------------- - - /// Broadcast - __device__ __forceinline__ T Broadcast( - T input, ///< [in] The value to broadcast - int src_lane) ///< [in] Which warp lane is to do the broadcasting - { - return ShuffleIndex(input, src_lane, LOGICAL_WARP_THREADS, member_mask); - } - - - //--------------------------------------------------------------------- - // Inclusive operations - //--------------------------------------------------------------------- - - /// Inclusive scan - template - __device__ __forceinline__ void InclusiveScan( - _T input, ///< [in] Calling thread's input item. - _T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - ScanOpT scan_op) ///< [in] Binary scan operator - { - inclusive_output = input; - - // Iterate scan steps - int segment_first_lane = 0; - - // Iterate scan steps -// InclusiveScanStep(inclusive_output, scan_op, segment_first_lane, Int2Type<0>()); - - // Iterate scan steps - #pragma unroll - for (int STEP = 0; STEP < STEPS; STEP++) - { - inclusive_output = InclusiveScanStep( - inclusive_output, - scan_op, - segment_first_lane, - (1 << STEP), - Int2Type::IS_SMALL_UNSIGNED>()); - } - - } - - /// Inclusive scan, specialized for reduce-value-by-key - template - __device__ __forceinline__ void InclusiveScan( - KeyValuePair input, ///< [in] Calling thread's input item. - KeyValuePair &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - ReduceByKeyOp scan_op) ///< [in] Binary scan operator - { - inclusive_output = input; - - KeyT pred_key = ShuffleUp(inclusive_output.key, 1, 0, member_mask); - - unsigned int ballot = WARP_BALLOT((pred_key != inclusive_output.key), member_mask); - - // Mask away all lanes greater than ours - ballot = ballot & LaneMaskLe(); - - // Find index of first set bit - int segment_first_lane = CUB_MAX(0, 31 - __clz(ballot)); - - // Iterate scan steps -// InclusiveScanStep(inclusive_output.value, scan_op.op, segment_first_lane, Int2Type<0>()); - - // Iterate scan steps - #pragma unroll - for (int STEP = 0; STEP < STEPS; STEP++) - { - inclusive_output.value = InclusiveScanStep( - inclusive_output.value, - scan_op.op, - segment_first_lane, - (1 << STEP), - Int2Type::IS_SMALL_UNSIGNED>()); - } - } - - - /// Inclusive scan with aggregate - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item. - T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - ScanOpT scan_op, ///< [in] Binary scan operator - T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. - { - InclusiveScan(input, inclusive_output, scan_op); - - // Grab aggregate from last warp lane - warp_aggregate = ShuffleIndex(inclusive_output, LOGICAL_WARP_THREADS - 1, LOGICAL_WARP_THREADS, member_mask); - } - - - //--------------------------------------------------------------------- - // Get exclusive from inclusive - //--------------------------------------------------------------------- - - /// Update inclusive and exclusive using input and inclusive - template - __device__ __forceinline__ void Update( - T /*input*/, ///< [in] - T &inclusive, ///< [in, out] - T &exclusive, ///< [out] - ScanOpT /*scan_op*/, ///< [in] - IsIntegerT /*is_integer*/) ///< [in] - { - // initial value unknown - exclusive = ShuffleUp(inclusive, 1, 0, member_mask); - } - - /// Update inclusive and exclusive using input and inclusive (specialized for summation of integer types) - __device__ __forceinline__ void Update( - T input, - T &inclusive, - T &exclusive, - cub::Sum /*scan_op*/, - Int2Type /*is_integer*/) - { - // initial value presumed 0 - exclusive = inclusive - input; - } - - /// Update inclusive and exclusive using initial value using input, inclusive, and initial value - template - __device__ __forceinline__ void Update ( - T /*input*/, - T &inclusive, - T &exclusive, - ScanOpT scan_op, - T initial_value, - IsIntegerT /*is_integer*/) - { - inclusive = scan_op(initial_value, inclusive); - exclusive = ShuffleUp(inclusive, 1, 0, member_mask); - if (lane_id == 0) - exclusive = initial_value; - } - - /// Update inclusive and exclusive using initial value using input and inclusive (specialized for summation of integer types) - __device__ __forceinline__ void Update ( - T input, - T &inclusive, - T &exclusive, - cub::Sum scan_op, - T initial_value, - Int2Type /*is_integer*/) - { - inclusive = scan_op(initial_value, inclusive); - exclusive = inclusive - input; - } - - - /// Update inclusive, exclusive, and warp aggregate using input and inclusive - template - __device__ __forceinline__ void Update ( - T input, - T &inclusive, - T &exclusive, - T &warp_aggregate, - ScanOpT scan_op, - IsIntegerT is_integer) - { - warp_aggregate = ShuffleIndex(inclusive, LOGICAL_WARP_THREADS - 1, LOGICAL_WARP_THREADS, member_mask); - Update(input, inclusive, exclusive, scan_op, is_integer); - } - - /// Update inclusive, exclusive, and warp aggregate using input, inclusive, and initial value - template - __device__ __forceinline__ void Update ( - T input, - T &inclusive, - T &exclusive, - T &warp_aggregate, - ScanOpT scan_op, - T initial_value, - IsIntegerT is_integer) - { - warp_aggregate = ShuffleIndex(inclusive, LOGICAL_WARP_THREADS - 1, LOGICAL_WARP_THREADS, member_mask); - Update(input, inclusive, exclusive, scan_op, initial_value, is_integer); - } - - - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/warp/specializations/warp_scan_smem.cuh b/ml-xgboost/cub/cub/warp/specializations/warp_scan_smem.cuh deleted file mode 100644 index fc96cd0..0000000 --- a/ml-xgboost/cub/cub/warp/specializations/warp_scan_smem.cuh +++ /dev/null @@ -1,395 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * cub::WarpScanSmem provides smem-based variants of parallel prefix scan of items partitioned across a CUDA thread warp. - */ - -#pragma once - -#include "../../thread/thread_operators.cuh" -#include "../../thread/thread_load.cuh" -#include "../../thread/thread_store.cuh" -#include "../../util_type.cuh" -#include "../../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \brief WarpScanSmem provides smem-based variants of parallel prefix scan of items partitioned across a CUDA thread warp. - */ -template < - typename T, ///< Data type being scanned - int LOGICAL_WARP_THREADS, ///< Number of threads per logical warp - int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective -struct WarpScanSmem -{ - /****************************************************************************** - * Constants and type definitions - ******************************************************************************/ - - enum - { - /// Whether the logical warp size and the PTX warp size coincide - IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), - - /// Whether the logical warp size is a power-of-two - IS_POW_OF_TWO = PowerOfTwo::VALUE, - - /// The number of warp scan steps - STEPS = Log2::VALUE, - - /// The number of threads in half a warp - HALF_WARP_THREADS = 1 << (STEPS - 1), - - /// The number of shared memory elements per warp - WARP_SMEM_ELEMENTS = LOGICAL_WARP_THREADS + HALF_WARP_THREADS, - }; - - /// Storage cell type (workaround for SM1x compiler bugs with custom-ops like Max() on signed chars) - typedef typename If<((Equals::VALUE || Equals::VALUE) && (PTX_ARCH < 200)), int, T>::Type CellT; - - /// Shared memory storage layout type (1.5 warps-worth of elements for each warp) - typedef CellT _TempStorage[WARP_SMEM_ELEMENTS]; - - // Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - _TempStorage &temp_storage; - unsigned int lane_id; - unsigned int member_mask; - - - /****************************************************************************** - * Construction - ******************************************************************************/ - - /// Constructor - __device__ __forceinline__ WarpScanSmem( - TempStorage &temp_storage) - : - temp_storage(temp_storage.Alias()), - lane_id(IS_ARCH_WARP ? - LaneId() : - LaneId() % LOGICAL_WARP_THREADS), - member_mask(!IS_POW_OF_TWO ? - (0xffffffff >> (32 - LOGICAL_WARP_THREADS)) : // non-power-of-two subwarps cannot be tiled - (0xffffffff >> (32 - LOGICAL_WARP_THREADS)) << (LaneId() / LOGICAL_WARP_THREADS)) - {} - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - - /// Basic inclusive scan iteration (template unrolled, inductive-case specialization) - template < - bool HAS_IDENTITY, - int STEP, - typename ScanOp> - __device__ __forceinline__ void ScanStep( - T &partial, - ScanOp scan_op, - Int2Type /*step*/) - { - const int OFFSET = 1 << STEP; - - // Share partial into buffer - ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) partial); - - WARP_SYNC(member_mask); - - // Update partial if addend is in range - if (HAS_IDENTITY || (lane_id >= OFFSET)) - { - T addend = (T) ThreadLoad(&temp_storage[HALF_WARP_THREADS + lane_id - OFFSET]); - partial = scan_op(addend, partial); - } - WARP_SYNC(member_mask); - - ScanStep(partial, scan_op, Int2Type()); - } - - - /// Basic inclusive scan iteration(template unrolled, base-case specialization) - template < - bool HAS_IDENTITY, - typename ScanOp> - __device__ __forceinline__ void ScanStep( - T &/*partial*/, - ScanOp /*scan_op*/, - Int2Type /*step*/) - {} - - - /// Inclusive prefix scan (specialized for summation across primitive types) - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item. - T &output, ///< [out] Calling thread's output item. May be aliased with \p input. - Sum scan_op, ///< [in] Binary scan operator - Int2Type /*is_primitive*/) ///< [in] Marker type indicating whether T is primitive type - { - T identity = 0; - ThreadStore(&temp_storage[lane_id], (CellT) identity); - - WARP_SYNC(member_mask); - - // Iterate scan steps - output = input; - ScanStep(output, scan_op, Int2Type<0>()); - } - - - /// Inclusive prefix scan - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item. - T &output, ///< [out] Calling thread's output item. May be aliased with \p input. - ScanOp scan_op, ///< [in] Binary scan operator - Int2Type /*is_primitive*/) ///< [in] Marker type indicating whether T is primitive type - { - // Iterate scan steps - output = input; - ScanStep(output, scan_op, Int2Type<0>()); - } - - - /****************************************************************************** - * Interface - ******************************************************************************/ - - //--------------------------------------------------------------------- - // Broadcast - //--------------------------------------------------------------------- - - /// Broadcast - __device__ __forceinline__ T Broadcast( - T input, ///< [in] The value to broadcast - unsigned int src_lane) ///< [in] Which warp lane is to do the broadcasting - { - if (lane_id == src_lane) - { - ThreadStore(temp_storage, (CellT) input); - } - - WARP_SYNC(member_mask); - - return (T)ThreadLoad(temp_storage); - } - - - //--------------------------------------------------------------------- - // Inclusive operations - //--------------------------------------------------------------------- - - /// Inclusive scan - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item. - T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - ScanOp scan_op) ///< [in] Binary scan operator - { - InclusiveScan(input, inclusive_output, scan_op, Int2Type::PRIMITIVE>()); - } - - - /// Inclusive scan with aggregate - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item. - T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - ScanOp scan_op, ///< [in] Binary scan operator - T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. - { - InclusiveScan(input, inclusive_output, scan_op); - - // Retrieve aggregate - ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive_output); - - WARP_SYNC(member_mask); - - warp_aggregate = (T) ThreadLoad(&temp_storage[WARP_SMEM_ELEMENTS - 1]); - - WARP_SYNC(member_mask); - } - - - //--------------------------------------------------------------------- - // Get exclusive from inclusive - //--------------------------------------------------------------------- - - /// Update inclusive and exclusive using input and inclusive - template - __device__ __forceinline__ void Update( - T /*input*/, ///< [in] - T &inclusive, ///< [in, out] - T &exclusive, ///< [out] - ScanOpT /*scan_op*/, ///< [in] - IsIntegerT /*is_integer*/) ///< [in] - { - // initial value unknown - ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); - - WARP_SYNC(member_mask); - - exclusive = (T) ThreadLoad(&temp_storage[HALF_WARP_THREADS + lane_id - 1]); - } - - /// Update inclusive and exclusive using input and inclusive (specialized for summation of integer types) - __device__ __forceinline__ void Update( - T input, - T &inclusive, - T &exclusive, - cub::Sum /*scan_op*/, - Int2Type /*is_integer*/) - { - // initial value presumed 0 - exclusive = inclusive - input; - } - - /// Update inclusive and exclusive using initial value using input, inclusive, and initial value - template - __device__ __forceinline__ void Update ( - T /*input*/, - T &inclusive, - T &exclusive, - ScanOpT scan_op, - T initial_value, - IsIntegerT /*is_integer*/) - { - inclusive = scan_op(initial_value, inclusive); - ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); - - WARP_SYNC(member_mask); - - exclusive = (T) ThreadLoad(&temp_storage[HALF_WARP_THREADS + lane_id - 1]); - if (lane_id == 0) - exclusive = initial_value; - } - - /// Update inclusive and exclusive using initial value using input and inclusive (specialized for summation of integer types) - __device__ __forceinline__ void Update ( - T input, - T &inclusive, - T &exclusive, - cub::Sum scan_op, - T initial_value, - Int2Type /*is_integer*/) - { - inclusive = scan_op(initial_value, inclusive); - exclusive = inclusive - input; - } - - - /// Update inclusive, exclusive, and warp aggregate using input and inclusive - template - __device__ __forceinline__ void Update ( - T /*input*/, - T &inclusive, - T &exclusive, - T &warp_aggregate, - ScanOpT /*scan_op*/, - IsIntegerT /*is_integer*/) - { - // Initial value presumed to be unknown or identity (either way our padding is correct) - ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); - - WARP_SYNC(member_mask); - - exclusive = (T) ThreadLoad(&temp_storage[HALF_WARP_THREADS + lane_id - 1]); - warp_aggregate = (T) ThreadLoad(&temp_storage[WARP_SMEM_ELEMENTS - 1]); - } - - /// Update inclusive, exclusive, and warp aggregate using input and inclusive (specialized for summation of integer types) - __device__ __forceinline__ void Update ( - T input, - T &inclusive, - T &exclusive, - T &warp_aggregate, - cub::Sum /*scan_o*/, - Int2Type /*is_integer*/) - { - // Initial value presumed to be unknown or identity (either way our padding is correct) - ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); - - WARP_SYNC(member_mask); - - warp_aggregate = (T) ThreadLoad(&temp_storage[WARP_SMEM_ELEMENTS - 1]); - exclusive = inclusive - input; - } - - /// Update inclusive, exclusive, and warp aggregate using input, inclusive, and initial value - template - __device__ __forceinline__ void Update ( - T /*input*/, - T &inclusive, - T &exclusive, - T &warp_aggregate, - ScanOpT scan_op, - T initial_value, - IsIntegerT /*is_integer*/) - { - // Broadcast warp aggregate - ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); - - WARP_SYNC(member_mask); - - warp_aggregate = (T) ThreadLoad(&temp_storage[WARP_SMEM_ELEMENTS - 1]); - - WARP_SYNC(member_mask); - - // Update inclusive with initial value - inclusive = scan_op(initial_value, inclusive); - - // Get exclusive from exclusive - ThreadStore(&temp_storage[HALF_WARP_THREADS + lane_id - 1], (CellT) inclusive); - - WARP_SYNC(member_mask); - - exclusive = (T) ThreadLoad(&temp_storage[HALF_WARP_THREADS + lane_id - 2]); - - if (lane_id == 0) - exclusive = initial_value; - } - - -}; - - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/warp/warp_reduce.cuh b/ml-xgboost/cub/cub/warp/warp_reduce.cuh deleted file mode 100644 index 1bb9afa..0000000 --- a/ml-xgboost/cub/cub/warp/warp_reduce.cuh +++ /dev/null @@ -1,612 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::WarpReduce class provides [collective](index.html#sec0) methods for computing a parallel reduction of items partitioned across a CUDA thread warp. - */ - -#pragma once - -#include "specializations/warp_reduce_shfl.cuh" -#include "specializations/warp_reduce_smem.cuh" -#include "../thread/thread_operators.cuh" -#include "../util_arch.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - - -/** - * \addtogroup WarpModule - * @{ - */ - -/** - * \brief The WarpReduce class provides [collective](index.html#sec0) methods for computing a parallel reduction of items partitioned across a CUDA thread warp. ![](warp_reduce_logo.png) - * - * \tparam T The reduction input/output element type - * \tparam LOGICAL_WARP_THREADS [optional] The number of threads per "logical" warp (may be less than the number of hardware warp threads). Default is the warp size of the targeted CUDA compute-capability (e.g., 32 threads for SM20). - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * - A reduction (or fold) - * uses a binary combining operator to compute a single aggregate from a list of input elements. - * - Supports "logical" warps smaller than the physical warp size (e.g., logical warps of 8 threads) - * - The number of entrant threads must be an multiple of \p LOGICAL_WARP_THREADS - * - * \par Performance Considerations - * - Uses special instructions when applicable (e.g., warp \p SHFL instructions) - * - Uses synchronization-free communication between warp lanes when applicable - * - Incurs zero bank conflicts for most types - * - Computation is slightly more efficient (i.e., having lower instruction overhead) for: - * - Summation (vs. generic reduction) - * - The architecture's warp size is a whole multiple of \p LOGICAL_WARP_THREADS - * - * \par Simple Examples - * \warpcollective{WarpReduce} - * \par - * The code snippet below illustrates four concurrent warp sum reductions within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpReduce for type int - * typedef cub::WarpReduce WarpReduce; - * - * // Allocate WarpReduce shared memory for 4 warps - * __shared__ typename WarpReduce::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Return the warp-wide sums to each lane0 (threads 0, 32, 64, and 96) - * int warp_id = threadIdx.x / 32; - * int aggregate = WarpReduce(temp_storage[warp_id]).Sum(thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, 1, 2, 3, ..., 127}. - * The corresponding output \p aggregate in threads 0, 32, 64, and 96 will \p 496, \p 1520, - * \p 2544, and \p 3568, respectively (and is undefined in other threads). - * - * \par - * The code snippet below illustrates a single warp sum reduction within a block of - * 128 threads. - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpReduce for type int - * typedef cub::WarpReduce WarpReduce; - * - * // Allocate WarpReduce shared memory for one warp - * __shared__ typename WarpReduce::TempStorage temp_storage; - * ... - * - * // Only the first warp performs a reduction - * if (threadIdx.x < 32) - * { - * // Obtain one input item per thread - * int thread_data = ... - * - * // Return the warp-wide sum to lane0 - * int aggregate = WarpReduce(temp_storage).Sum(thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the warp of threads is {0, 1, 2, 3, ..., 31}. - * The corresponding output \p aggregate in thread0 will be \p 496 (and is undefined in other threads). - * - */ -template < - typename T, - int LOGICAL_WARP_THREADS = CUB_PTX_WARP_THREADS, - int PTX_ARCH = CUB_PTX_ARCH> -class WarpReduce -{ -private: - - /****************************************************************************** - * Constants and type definitions - ******************************************************************************/ - - enum - { - /// Whether the logical warp size and the PTX warp size coincide - IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), - - /// Whether the logical warp size is a power-of-two - IS_POW_OF_TWO = PowerOfTwo::VALUE, - }; - -public: - - #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document - - /// Internal specialization. Use SHFL-based reduction if (architecture is >= SM30) and (LOGICAL_WARP_THREADS is a power-of-two) - typedef typename If<(PTX_ARCH >= 300) && (IS_POW_OF_TWO), - WarpReduceShfl, - WarpReduceSmem >::Type InternalWarpReduce; - - #endif // DOXYGEN_SHOULD_SKIP_THIS - - -private: - - /// Shared memory storage layout type for WarpReduce - typedef typename InternalWarpReduce::TempStorage _TempStorage; - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Shared storage reference - _TempStorage &temp_storage; - - - /****************************************************************************** - * Utility methods - ******************************************************************************/ - -public: - - /// \smemstorage{WarpReduce} - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. Logical warp and lane identifiers are constructed from threadIdx.x. - */ - __device__ __forceinline__ WarpReduce( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()) - {} - - - //@} end member group - /******************************************************************//** - * \name Summation reductions - *********************************************************************/ - //@{ - - - /** - * \brief Computes a warp-wide sum in the calling warp. The output is valid in warp lane0. - * - * \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp sum reductions within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpReduce for type int - * typedef cub::WarpReduce WarpReduce; - * - * // Allocate WarpReduce shared memory for 4 warps - * __shared__ typename WarpReduce::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Return the warp-wide sums to each lane0 - * int warp_id = threadIdx.x / 32; - * int aggregate = WarpReduce(temp_storage[warp_id]).Sum(thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, 1, 2, 3, ..., 127}. - * The corresponding output \p aggregate in threads 0, 32, 64, and 96 will \p 496, \p 1520, - * \p 2544, and \p 3568, respectively (and is undefined in other threads). - * - */ - __device__ __forceinline__ T Sum( - T input) ///< [in] Calling thread's input - { - return InternalWarpReduce(temp_storage).Reduce(input, LOGICAL_WARP_THREADS, cub::Sum()); - } - - /** - * \brief Computes a partially-full warp-wide sum in the calling warp. The output is valid in warp lane0. - * - * All threads across the calling warp must agree on the same value for \p valid_items. Otherwise the result is undefined. - * - * \smemreuse - * - * \par Snippet - * The code snippet below illustrates a sum reduction within a single, partially-full - * block of 32 threads (one warp). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(int *d_data, int valid_items) - * { - * // Specialize WarpReduce for type int - * typedef cub::WarpReduce WarpReduce; - * - * // Allocate WarpReduce shared memory for one warp - * __shared__ typename WarpReduce::TempStorage temp_storage; - * - * // Obtain one input item per thread if in range - * int thread_data; - * if (threadIdx.x < valid_items) - * thread_data = d_data[threadIdx.x]; - * - * // Return the warp-wide sums to each lane0 - * int aggregate = WarpReduce(temp_storage).Sum( - * thread_data, valid_items); - * - * \endcode - * \par - * Suppose the input \p d_data is {0, 1, 2, 3, 4, ... and \p valid_items - * is \p 4. The corresponding output \p aggregate in thread0 is \p 6 (and is - * undefined in other threads). - * - */ - __device__ __forceinline__ T Sum( - T input, ///< [in] Calling thread's input - int valid_items) ///< [in] Total number of valid items in the calling thread's logical warp (may be less than \p LOGICAL_WARP_THREADS) - { - // Determine if we don't need bounds checking - return InternalWarpReduce(temp_storage).Reduce(input, valid_items, cub::Sum()); - } - - - /** - * \brief Computes a segmented sum in the calling warp where segments are defined by head-flags. The sum of each segment is returned to the first lane in that segment (which always includes lane0). - * - * \smemreuse - * - * \par Snippet - * The code snippet below illustrates a head-segmented warp sum - * reduction within a block of 32 threads (one warp). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpReduce for type int - * typedef cub::WarpReduce WarpReduce; - * - * // Allocate WarpReduce shared memory for one warp - * __shared__ typename WarpReduce::TempStorage temp_storage; - * - * // Obtain one input item and flag per thread - * int thread_data = ... - * int head_flag = ... - * - * // Return the warp-wide sums to each lane0 - * int aggregate = WarpReduce(temp_storage).HeadSegmentedSum( - * thread_data, head_flag); - * - * \endcode - * \par - * Suppose the set of input \p thread_data and \p head_flag across the block of threads - * is {0, 1, 2, 3, ..., 31 and is {1, 0, 0, 0, 1, 0, 0, 0, ..., 1, 0, 0, 0, - * respectively. The corresponding output \p aggregate in threads 0, 4, 8, etc. will be - * \p 6, \p 22, \p 38, etc. (and is undefined in other threads). - * - * \tparam ReductionOp [inferred] Binary reduction operator type having member T operator()(const T &a, const T &b) - * - */ - template < - typename FlagT> - __device__ __forceinline__ T HeadSegmentedSum( - T input, ///< [in] Calling thread's input - FlagT head_flag) ///< [in] Head flag denoting whether or not \p input is the start of a new segment - { - return HeadSegmentedReduce(input, head_flag, cub::Sum()); - } - - - /** - * \brief Computes a segmented sum in the calling warp where segments are defined by tail-flags. The sum of each segment is returned to the first lane in that segment (which always includes lane0). - * - * \smemreuse - * - * \par Snippet - * The code snippet below illustrates a tail-segmented warp sum - * reduction within a block of 32 threads (one warp). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpReduce for type int - * typedef cub::WarpReduce WarpReduce; - * - * // Allocate WarpReduce shared memory for one warp - * __shared__ typename WarpReduce::TempStorage temp_storage; - * - * // Obtain one input item and flag per thread - * int thread_data = ... - * int tail_flag = ... - * - * // Return the warp-wide sums to each lane0 - * int aggregate = WarpReduce(temp_storage).TailSegmentedSum( - * thread_data, tail_flag); - * - * \endcode - * \par - * Suppose the set of input \p thread_data and \p tail_flag across the block of threads - * is {0, 1, 2, 3, ..., 31 and is {0, 0, 0, 1, 0, 0, 0, 1, ..., 0, 0, 0, 1, - * respectively. The corresponding output \p aggregate in threads 0, 4, 8, etc. will be - * \p 6, \p 22, \p 38, etc. (and is undefined in other threads). - * - * \tparam ReductionOp [inferred] Binary reduction operator type having member T operator()(const T &a, const T &b) - */ - template < - typename FlagT> - __device__ __forceinline__ T TailSegmentedSum( - T input, ///< [in] Calling thread's input - FlagT tail_flag) ///< [in] Head flag denoting whether or not \p input is the start of a new segment - { - return TailSegmentedReduce(input, tail_flag, cub::Sum()); - } - - - - //@} end member group - /******************************************************************//** - * \name Generic reductions - *********************************************************************/ - //@{ - - /** - * \brief Computes a warp-wide reduction in the calling warp using the specified binary reduction functor. The output is valid in warp lane0. - * - * Supports non-commutative reduction operators - * - * \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp max reductions within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpReduce for type int - * typedef cub::WarpReduce WarpReduce; - * - * // Allocate WarpReduce shared memory for 4 warps - * __shared__ typename WarpReduce::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Return the warp-wide reductions to each lane0 - * int warp_id = threadIdx.x / 32; - * int aggregate = WarpReduce(temp_storage[warp_id]).Reduce( - * thread_data, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, 1, 2, 3, ..., 127}. - * The corresponding output \p aggregate in threads 0, 32, 64, and 96 will \p 31, \p 63, - * \p 95, and \p 127, respectively (and is undefined in other threads). - * - * \tparam ReductionOp [inferred] Binary reduction operator type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ T Reduce( - T input, ///< [in] Calling thread's input - ReductionOp reduction_op) ///< [in] Binary reduction operator - { - return InternalWarpReduce(temp_storage).Reduce(input, LOGICAL_WARP_THREADS, reduction_op); - } - - /** - * \brief Computes a partially-full warp-wide reduction in the calling warp using the specified binary reduction functor. The output is valid in warp lane0. - * - * All threads across the calling warp must agree on the same value for \p valid_items. Otherwise the result is undefined. - * - * Supports non-commutative reduction operators - * - * \smemreuse - * - * \par Snippet - * The code snippet below illustrates a max reduction within a single, partially-full - * block of 32 threads (one warp). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(int *d_data, int valid_items) - * { - * // Specialize WarpReduce for type int - * typedef cub::WarpReduce WarpReduce; - * - * // Allocate WarpReduce shared memory for one warp - * __shared__ typename WarpReduce::TempStorage temp_storage; - * - * // Obtain one input item per thread if in range - * int thread_data; - * if (threadIdx.x < valid_items) - * thread_data = d_data[threadIdx.x]; - * - * // Return the warp-wide reductions to each lane0 - * int aggregate = WarpReduce(temp_storage).Reduce( - * thread_data, cub::Max(), valid_items); - * - * \endcode - * \par - * Suppose the input \p d_data is {0, 1, 2, 3, 4, ... and \p valid_items - * is \p 4. The corresponding output \p aggregate in thread0 is \p 3 (and is - * undefined in other threads). - * - * \tparam ReductionOp [inferred] Binary reduction operator type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ T Reduce( - T input, ///< [in] Calling thread's input - ReductionOp reduction_op, ///< [in] Binary reduction operator - int valid_items) ///< [in] Total number of valid items in the calling thread's logical warp (may be less than \p LOGICAL_WARP_THREADS) - { - return InternalWarpReduce(temp_storage).Reduce(input, valid_items, reduction_op); - } - - - /** - * \brief Computes a segmented reduction in the calling warp where segments are defined by head-flags. The reduction of each segment is returned to the first lane in that segment (which always includes lane0). - * - * Supports non-commutative reduction operators - * - * \smemreuse - * - * \par Snippet - * The code snippet below illustrates a head-segmented warp max - * reduction within a block of 32 threads (one warp). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpReduce for type int - * typedef cub::WarpReduce WarpReduce; - * - * // Allocate WarpReduce shared memory for one warp - * __shared__ typename WarpReduce::TempStorage temp_storage; - * - * // Obtain one input item and flag per thread - * int thread_data = ... - * int head_flag = ... - * - * // Return the warp-wide reductions to each lane0 - * int aggregate = WarpReduce(temp_storage).HeadSegmentedReduce( - * thread_data, head_flag, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data and \p head_flag across the block of threads - * is {0, 1, 2, 3, ..., 31 and is {1, 0, 0, 0, 1, 0, 0, 0, ..., 1, 0, 0, 0, - * respectively. The corresponding output \p aggregate in threads 0, 4, 8, etc. will be - * \p 3, \p 7, \p 11, etc. (and is undefined in other threads). - * - * \tparam ReductionOp [inferred] Binary reduction operator type having member T operator()(const T &a, const T &b) - */ - template < - typename ReductionOp, - typename FlagT> - __device__ __forceinline__ T HeadSegmentedReduce( - T input, ///< [in] Calling thread's input - FlagT head_flag, ///< [in] Head flag denoting whether or not \p input is the start of a new segment - ReductionOp reduction_op) ///< [in] Reduction operator - { - return InternalWarpReduce(temp_storage).template SegmentedReduce(input, head_flag, reduction_op); - } - - - /** - * \brief Computes a segmented reduction in the calling warp where segments are defined by tail-flags. The reduction of each segment is returned to the first lane in that segment (which always includes lane0). - * - * Supports non-commutative reduction operators - * - * \smemreuse - * - * \par Snippet - * The code snippet below illustrates a tail-segmented warp max - * reduction within a block of 32 threads (one warp). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpReduce for type int - * typedef cub::WarpReduce WarpReduce; - * - * // Allocate WarpReduce shared memory for one warp - * __shared__ typename WarpReduce::TempStorage temp_storage; - * - * // Obtain one input item and flag per thread - * int thread_data = ... - * int tail_flag = ... - * - * // Return the warp-wide reductions to each lane0 - * int aggregate = WarpReduce(temp_storage).TailSegmentedReduce( - * thread_data, tail_flag, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data and \p tail_flag across the block of threads - * is {0, 1, 2, 3, ..., 31 and is {0, 0, 0, 1, 0, 0, 0, 1, ..., 0, 0, 0, 1, - * respectively. The corresponding output \p aggregate in threads 0, 4, 8, etc. will be - * \p 3, \p 7, \p 11, etc. (and is undefined in other threads). - * - * \tparam ReductionOp [inferred] Binary reduction operator type having member T operator()(const T &a, const T &b) - */ - template < - typename ReductionOp, - typename FlagT> - __device__ __forceinline__ T TailSegmentedReduce( - T input, ///< [in] Calling thread's input - FlagT tail_flag, ///< [in] Tail flag denoting whether or not \p input is the end of the current segment - ReductionOp reduction_op) ///< [in] Reduction operator - { - return InternalWarpReduce(temp_storage).template SegmentedReduce(input, tail_flag, reduction_op); - } - - - - //@} end member group -}; - -/** @} */ // end group WarpModule - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/cub/warp/warp_scan.cuh b/ml-xgboost/cub/cub/warp/warp_scan.cuh deleted file mode 100644 index 68196e3..0000000 --- a/ml-xgboost/cub/cub/warp/warp_scan.cuh +++ /dev/null @@ -1,936 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/** - * \file - * The cub::WarpScan class provides [collective](index.html#sec0) methods for computing a parallel prefix scan of items partitioned across a CUDA thread warp. - */ - -#pragma once - -#include "specializations/warp_scan_shfl.cuh" -#include "specializations/warp_scan_smem.cuh" -#include "../thread/thread_operators.cuh" -#include "../util_arch.cuh" -#include "../util_type.cuh" -#include "../util_namespace.cuh" - -/// Optional outer namespace(s) -CUB_NS_PREFIX - -/// CUB namespace -namespace cub { - -/** - * \addtogroup WarpModule - * @{ - */ - -/** - * \brief The WarpScan class provides [collective](index.html#sec0) methods for computing a parallel prefix scan of items partitioned across a CUDA thread warp. ![](warp_scan_logo.png) - * - * \tparam T The scan input/output element type - * \tparam LOGICAL_WARP_THREADS [optional] The number of threads per "logical" warp (may be less than the number of hardware warp threads). Default is the warp size associated with the CUDA Compute Capability targeted by the compiler (e.g., 32 threads for SM20). - * \tparam PTX_ARCH [optional] \ptxversion - * - * \par Overview - * - Given a list of input elements and a binary reduction operator, a [prefix scan](http://en.wikipedia.org/wiki/Prefix_sum) - * produces an output list where each element is computed to be the reduction - * of the elements occurring earlier in the input list. Prefix sum - * connotes a prefix scan with the addition operator. The term \em inclusive indicates - * that the ith output reduction incorporates the ith input. - * The term \em exclusive indicates the ith input is not incorporated into - * the ith output reduction. - * - Supports non-commutative scan operators - * - Supports "logical" warps smaller than the physical warp size (e.g., a logical warp of 8 threads) - * - The number of entrant threads must be an multiple of \p LOGICAL_WARP_THREADS - * - * \par Performance Considerations - * - Uses special instructions when applicable (e.g., warp \p SHFL) - * - Uses synchronization-free communication between warp lanes when applicable - * - Incurs zero bank conflicts for most types - * - Computation is slightly more efficient (i.e., having lower instruction overhead) for: - * - Summation (vs. generic scan) - * - The architecture's warp size is a whole multiple of \p LOGICAL_WARP_THREADS - * - * \par Simple Examples - * \warpcollective{WarpScan} - * \par - * The code snippet below illustrates four concurrent warp prefix sums within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute warp-wide prefix sums - * int warp_id = threadIdx.x / 32; - * WarpScan(temp_storage[warp_id]).ExclusiveSum(thread_data, thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {1, 1, 1, 1, ...}. - * The corresponding output \p thread_data in each of the four warps of threads will be - * 0, 1, 2, 3, ..., 31}. - * - * \par - * The code snippet below illustrates a single warp prefix sum within a block of - * 128 threads. - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for one warp - * __shared__ typename WarpScan::TempStorage temp_storage; - * ... - * - * // Only the first warp performs a prefix sum - * if (threadIdx.x < 32) - * { - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute warp-wide prefix sums - * WarpScan(temp_storage).ExclusiveSum(thread_data, thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the warp of threads is {1, 1, 1, 1, ...}. - * The corresponding output \p thread_data will be {0, 1, 2, 3, ..., 31}. - * - */ -template < - typename T, - int LOGICAL_WARP_THREADS = CUB_PTX_WARP_THREADS, - int PTX_ARCH = CUB_PTX_ARCH> -class WarpScan -{ -private: - - /****************************************************************************** - * Constants and type definitions - ******************************************************************************/ - - enum - { - /// Whether the logical warp size and the PTX warp size coincide - IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), - - /// Whether the logical warp size is a power-of-two - IS_POW_OF_TWO = ((LOGICAL_WARP_THREADS & (LOGICAL_WARP_THREADS - 1)) == 0), - - /// Whether the data type is an integer (which has fully-associative addition) - IS_INTEGER = ((Traits::CATEGORY == SIGNED_INTEGER) || (Traits::CATEGORY == UNSIGNED_INTEGER)) - }; - - /// Internal specialization. Use SHFL-based scan if (architecture is >= SM30) and (LOGICAL_WARP_THREADS is a power-of-two) - typedef typename If<(PTX_ARCH >= 300) && (IS_POW_OF_TWO), - WarpScanShfl, - WarpScanSmem >::Type InternalWarpScan; - - /// Shared memory storage layout type for WarpScan - typedef typename InternalWarpScan::TempStorage _TempStorage; - - - /****************************************************************************** - * Thread fields - ******************************************************************************/ - - /// Shared storage reference - _TempStorage &temp_storage; - unsigned int lane_id; - - - - /****************************************************************************** - * Public types - ******************************************************************************/ - -public: - - /// \smemstorage{WarpScan} - struct TempStorage : Uninitialized<_TempStorage> {}; - - - /******************************************************************//** - * \name Collective constructors - *********************************************************************/ - //@{ - - /** - * \brief Collective constructor using the specified memory allocation as temporary storage. Logical warp and lane identifiers are constructed from threadIdx.x. - */ - __device__ __forceinline__ WarpScan( - TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage - : - temp_storage(temp_storage.Alias()), - lane_id(IS_ARCH_WARP ? - LaneId() : - LaneId() % LOGICAL_WARP_THREADS) - {} - - - //@} end member group - /******************************************************************//** - * \name Inclusive prefix sums - *********************************************************************/ - //@{ - - - /** - * \brief Computes an inclusive prefix sum across the calling warp. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide inclusive prefix sums within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute inclusive warp-wide prefix sums - * int warp_id = threadIdx.x / 32; - * WarpScan(temp_storage[warp_id]).InclusiveSum(thread_data, thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {1, 1, 1, 1, ...}. - * The corresponding output \p thread_data in each of the four warps of threads will be - * 1, 2, 3, ..., 32}. - */ - __device__ __forceinline__ void InclusiveSum( - T input, ///< [in] Calling thread's input item. - T &inclusive_output) ///< [out] Calling thread's output item. May be aliased with \p input. - { - InclusiveScan(input, inclusive_output, cub::Sum()); - } - - - /** - * \brief Computes an inclusive prefix sum across the calling warp. Also provides every thread with the warp-wide \p warp_aggregate of all inputs. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide inclusive prefix sums within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute inclusive warp-wide prefix sums - * int warp_aggregate; - * int warp_id = threadIdx.x / 32; - * WarpScan(temp_storage[warp_id]).InclusiveSum(thread_data, thread_data, warp_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {1, 1, 1, 1, ...}. - * The corresponding output \p thread_data in each of the four warps of threads will be - * 1, 2, 3, ..., 32}. Furthermore, \p warp_aggregate for all threads in all warps will be \p 32. - */ - __device__ __forceinline__ void InclusiveSum( - T input, ///< [in] Calling thread's input item. - T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. - { - InclusiveScan(input, inclusive_output, cub::Sum(), warp_aggregate); - } - - - //@} end member group - /******************************************************************//** - * \name Exclusive prefix sums - *********************************************************************/ - //@{ - - - /** - * \brief Computes an exclusive prefix sum across the calling warp. The value of 0 is applied as the initial value, and is assigned to \p exclusive_output in thread0. - * - * \par - * - \identityzero - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide exclusive prefix sums within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute exclusive warp-wide prefix sums - * int warp_id = threadIdx.x / 32; - * WarpScan(temp_storage[warp_id]).ExclusiveSum(thread_data, thread_data); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {1, 1, 1, 1, ...}. - * The corresponding output \p thread_data in each of the four warps of threads will be - * 0, 1, 2, ..., 31}. - * - */ - __device__ __forceinline__ void ExclusiveSum( - T input, ///< [in] Calling thread's input item. - T &exclusive_output) ///< [out] Calling thread's output item. May be aliased with \p input. - { - T initial_value = 0; - ExclusiveScan(input, exclusive_output, initial_value, cub::Sum()); - } - - - /** - * \brief Computes an exclusive prefix sum across the calling warp. The value of 0 is applied as the initial value, and is assigned to \p exclusive_output in thread0. Also provides every thread with the warp-wide \p warp_aggregate of all inputs. - * - * \par - * - \identityzero - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide exclusive prefix sums within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute exclusive warp-wide prefix sums - * int warp_aggregate; - * int warp_id = threadIdx.x / 32; - * WarpScan(temp_storage[warp_id]).ExclusiveSum(thread_data, thread_data, warp_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {1, 1, 1, 1, ...}. - * The corresponding output \p thread_data in each of the four warps of threads will be - * 0, 1, 2, ..., 31}. Furthermore, \p warp_aggregate for all threads in all warps will be \p 32. - */ - __device__ __forceinline__ void ExclusiveSum( - T input, ///< [in] Calling thread's input item. - T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. - { - T initial_value = 0; - ExclusiveScan(input, exclusive_output, initial_value, cub::Sum(), warp_aggregate); - } - - - //@} end member group - /******************************************************************//** - * \name Inclusive prefix scans - *********************************************************************/ - //@{ - - /** - * \brief Computes an inclusive prefix scan using the specified binary scan functor across the calling warp. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide inclusive prefix max scans within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute inclusive warp-wide prefix max scans - * int warp_id = threadIdx.x / 32; - * WarpScan(temp_storage[warp_id]).InclusiveScan(thread_data, thread_data, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, -1, 2, -3, ..., 126, -127}. - * The corresponding output \p thread_data in the first warp would be - * 0, 0, 2, 2, ..., 30, 30, the output for the second warp would be 32, 32, 34, 34, ..., 62, 62, etc. - * - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item. - T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - ScanOp scan_op) ///< [in] Binary scan operator - { - InternalWarpScan(temp_storage).InclusiveScan(input, inclusive_output, scan_op); - } - - - /** - * \brief Computes an inclusive prefix scan using the specified binary scan functor across the calling warp. Also provides every thread with the warp-wide \p warp_aggregate of all inputs. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide inclusive prefix max scans within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute inclusive warp-wide prefix max scans - * int warp_aggregate; - * int warp_id = threadIdx.x / 32; - * WarpScan(temp_storage[warp_id]).InclusiveScan( - * thread_data, thread_data, cub::Max(), warp_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, -1, 2, -3, ..., 126, -127}. - * The corresponding output \p thread_data in the first warp would be - * 0, 0, 2, 2, ..., 30, 30, the output for the second warp would be 32, 32, 34, 34, ..., 62, 62, etc. - * Furthermore, \p warp_aggregate would be assigned \p 30 for threads in the first warp, \p 62 for threads - * in the second warp, etc. - * - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void InclusiveScan( - T input, ///< [in] Calling thread's input item. - T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - ScanOp scan_op, ///< [in] Binary scan operator - T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. - { - InternalWarpScan(temp_storage).InclusiveScan(input, inclusive_output, scan_op, warp_aggregate); - } - - - //@} end member group - /******************************************************************//** - * \name Exclusive prefix scans - *********************************************************************/ - //@{ - - /** - * \brief Computes an exclusive prefix scan using the specified binary scan functor across the calling warp. Because no initial value is supplied, the \p output computed for warp-lane0 is undefined. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute exclusive warp-wide prefix max scans - * int warp_id = threadIdx.x / 32; - * WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, thread_data, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, -1, 2, -3, ..., 126, -127}. - * The corresponding output \p thread_data in the first warp would be - * ?, 0, 0, 2, ..., 28, 30, the output for the second warp would be ?, 32, 32, 34, ..., 60, 62, etc. - * (The output \p thread_data in warp lane0 is undefined.) - * - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item. - T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - ScanOp scan_op) ///< [in] Binary scan operator - { - InternalWarpScan internal(temp_storage); - - T inclusive_output; - internal.InclusiveScan(input, inclusive_output, scan_op); - - internal.Update( - input, - inclusive_output, - exclusive_output, - scan_op, - Int2Type()); - } - - - /** - * \brief Computes an exclusive prefix scan using the specified binary scan functor across the calling warp. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute exclusive warp-wide prefix max scans - * int warp_id = threadIdx.x / 32; - * WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, -1, 2, -3, ..., 126, -127}. - * The corresponding output \p thread_data in the first warp would be - * INT_MIN, 0, 0, 2, ..., 28, 30, the output for the second warp would be 30, 32, 32, 34, ..., 60, 62, etc. - * - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item. - T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - T initial_value, ///< [in] Initial value to seed the exclusive scan - ScanOp scan_op) ///< [in] Binary scan operator - { - InternalWarpScan internal(temp_storage); - - T inclusive_output; - internal.InclusiveScan(input, inclusive_output, scan_op); - - internal.Update( - input, - inclusive_output, - exclusive_output, - scan_op, - initial_value, - Int2Type()); - } - - - /** - * \brief Computes an exclusive prefix scan using the specified binary scan functor across the calling warp. Because no initial value is supplied, the \p output computed for warp-lane0 is undefined. Also provides every thread with the warp-wide \p warp_aggregate of all inputs. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute exclusive warp-wide prefix max scans - * int warp_aggregate; - * int warp_id = threadIdx.x / 32; - * WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, thread_data, cub::Max(), warp_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, -1, 2, -3, ..., 126, -127}. - * The corresponding output \p thread_data in the first warp would be - * ?, 0, 0, 2, ..., 28, 30, the output for the second warp would be ?, 32, 32, 34, ..., 60, 62, etc. - * (The output \p thread_data in warp lane0 is undefined.) Furthermore, \p warp_aggregate would be assigned \p 30 for threads in the first warp, \p 62 for threads - * in the second warp, etc. - * - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item. - T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - ScanOp scan_op, ///< [in] Binary scan operator - T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. - { - InternalWarpScan internal(temp_storage); - - T inclusive_output; - internal.InclusiveScan(input, inclusive_output, scan_op); - - internal.Update( - input, - inclusive_output, - exclusive_output, - warp_aggregate, - scan_op, - Int2Type()); - } - - - /** - * \brief Computes an exclusive prefix scan using the specified binary scan functor across the calling warp. Also provides every thread with the warp-wide \p warp_aggregate of all inputs. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute exclusive warp-wide prefix max scans - * int warp_aggregate; - * int warp_id = threadIdx.x / 32; - * WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max(), warp_aggregate); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, -1, 2, -3, ..., 126, -127}. - * The corresponding output \p thread_data in the first warp would be - * INT_MIN, 0, 0, 2, ..., 28, 30, the output for the second warp would be 30, 32, 32, 34, ..., 60, 62, etc. - * Furthermore, \p warp_aggregate would be assigned \p 30 for threads in the first warp, \p 62 for threads - * in the second warp, etc. - * - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void ExclusiveScan( - T input, ///< [in] Calling thread's input item. - T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. - T initial_value, ///< [in] Initial value to seed the exclusive scan - ScanOp scan_op, ///< [in] Binary scan operator - T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. - { - InternalWarpScan internal(temp_storage); - - T inclusive_output; - internal.InclusiveScan(input, inclusive_output, scan_op); - - internal.Update( - input, - inclusive_output, - exclusive_output, - warp_aggregate, - scan_op, - initial_value, - Int2Type()); - } - - - //@} end member group - /******************************************************************//** - * \name Combination (inclusive & exclusive) prefix scans - *********************************************************************/ - //@{ - - - /** - * \brief Computes both inclusive and exclusive prefix scans using the specified binary scan functor across the calling warp. Because no initial value is supplied, the \p exclusive_output computed for warp-lane0 is undefined. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute exclusive warp-wide prefix max scans - * int inclusive_partial, exclusive_partial; - * WarpScan(temp_storage[warp_id]).Scan(thread_data, inclusive_partial, exclusive_partial, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, -1, 2, -3, ..., 126, -127}. - * The corresponding output \p inclusive_partial in the first warp would be - * 0, 0, 2, 2, ..., 30, 30, the output for the second warp would be 32, 32, 34, 34, ..., 62, 62, etc. - * The corresponding output \p exclusive_partial in the first warp would be - * ?, 0, 0, 2, ..., 28, 30, the output for the second warp would be ?, 32, 32, 34, ..., 60, 62, etc. - * (The output \p thread_data in warp lane0 is undefined.) - * - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void Scan( - T input, ///< [in] Calling thread's input item. - T &inclusive_output, ///< [out] Calling thread's inclusive-scan output item. - T &exclusive_output, ///< [out] Calling thread's exclusive-scan output item. - ScanOp scan_op) ///< [in] Binary scan operator - { - InternalWarpScan internal(temp_storage); - - internal.InclusiveScan(input, inclusive_output, scan_op); - - internal.Update( - input, - inclusive_output, - exclusive_output, - scan_op, - Int2Type()); - } - - - /** - * \brief Computes both inclusive and exclusive prefix scans using the specified binary scan functor across the calling warp. - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates four concurrent warp-wide prefix max scans within a block of - * 128 threads (one per each of the 32-thread warps). - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Compute inclusive warp-wide prefix max scans - * int warp_id = threadIdx.x / 32; - * int inclusive_partial, exclusive_partial; - * WarpScan(temp_storage[warp_id]).Scan(thread_data, inclusive_partial, exclusive_partial, INT_MIN, cub::Max()); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, -1, 2, -3, ..., 126, -127}. - * The corresponding output \p inclusive_partial in the first warp would be - * 0, 0, 2, 2, ..., 30, 30, the output for the second warp would be 32, 32, 34, 34, ..., 62, 62, etc. - * The corresponding output \p exclusive_partial in the first warp would be - * INT_MIN, 0, 0, 2, ..., 28, 30, the output for the second warp would be 30, 32, 32, 34, ..., 60, 62, etc. - * - * \tparam ScanOp [inferred] Binary scan operator type having member T operator()(const T &a, const T &b) - */ - template - __device__ __forceinline__ void Scan( - T input, ///< [in] Calling thread's input item. - T &inclusive_output, ///< [out] Calling thread's inclusive-scan output item. - T &exclusive_output, ///< [out] Calling thread's exclusive-scan output item. - T initial_value, ///< [in] Initial value to seed the exclusive scan - ScanOp scan_op) ///< [in] Binary scan operator - { - InternalWarpScan internal(temp_storage); - - internal.InclusiveScan(input, inclusive_output, scan_op); - - internal.Update( - input, - inclusive_output, - exclusive_output, - scan_op, - initial_value, - Int2Type()); - } - - - - //@} end member group - /******************************************************************//** - * \name Data exchange - *********************************************************************/ - //@{ - - /** - * \brief Broadcast the value \p input from warp-lanesrc_lane to all lanes in the warp - * - * \par - * - \smemreuse - * - * \par Snippet - * The code snippet below illustrates the warp-wide broadcasts of values from - * lanes0 in each of four warps to all other threads in those warps. - * \par - * \code - * #include - * - * __global__ void ExampleKernel(...) - * { - * // Specialize WarpScan for type int - * typedef cub::WarpScan WarpScan; - * - * // Allocate WarpScan shared memory for 4 warps - * __shared__ typename WarpScan::TempStorage temp_storage[4]; - * - * // Obtain one input item per thread - * int thread_data = ... - * - * // Broadcast from lane0 in each warp to all other threads in the warp - * int warp_id = threadIdx.x / 32; - * thread_data = WarpScan(temp_storage[warp_id]).Broadcast(thread_data, 0); - * - * \endcode - * \par - * Suppose the set of input \p thread_data across the block of threads is {0, 1, 2, 3, ..., 127}. - * The corresponding output \p thread_data will be - * {0, 0, ..., 0} in warp0, - * {32, 32, ..., 32} in warp1, - * {64, 64, ..., 64} in warp2, etc. - */ - __device__ __forceinline__ T Broadcast( - T input, ///< [in] The value to broadcast - unsigned int src_lane) ///< [in] Which warp lane is to do the broadcasting - { - return InternalWarpScan(temp_storage).Broadcast(input, src_lane); - } - - //@} end member group - -}; - -/** @} */ // end group WarpModule - -} // CUB namespace -CUB_NS_POSTFIX // Optional outer namespace(s) diff --git a/ml-xgboost/cub/eclipse code style profile.xml b/ml-xgboost/cub/eclipse code style profile.xml deleted file mode 100644 index 2456fa7..0000000 --- a/ml-xgboost/cub/eclipse code style profile.xml +++ /dev/null @@ -1,155 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ml-xgboost/cub/examples/block/.gitignore b/ml-xgboost/cub/examples/block/.gitignore deleted file mode 100644 index 9dad963..0000000 --- a/ml-xgboost/cub/examples/block/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -/bin -/Debug -/Release -/cuda55.sdf -/cuda55.suo -/cuda60.sdf -/cuda60.suo diff --git a/ml-xgboost/cub/examples/block/Makefile b/ml-xgboost/cub/examples/block/Makefile deleted file mode 100644 index 85f2b2a..0000000 --- a/ml-xgboost/cub/examples/block/Makefile +++ /dev/null @@ -1,128 +0,0 @@ -#/****************************************************************************** -# * Copyright (c) 2011, Duane Merrill. All rights reserved. -# * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. -# * -# * Redistribution and use in source and binary forms, with or without -# * modification, are permitted provided that the following conditions are met: -# * * Redistributions of source code must retain the above copyright -# * notice, this list of conditions and the following disclaimer. -# * * Redistributions in binary form must reproduce the above copyright -# * notice, this list of conditions and the following disclaimer in the -# * documentation and/or other materials provided with the distribution. -# * * Neither the name of the NVIDIA CORPORATION nor the -# * names of its contributors may be used to endorse or promote products -# * derived from this software without specific prior written permission. -# * -# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY -# * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# * -#******************************************************************************/ - -#------------------------------------------------------------------------------- -# -# Makefile usage -# -# make [sm=] [cdp=<0|1>] [force32=<0|1>] [abi=<0|1>] [open64=<0|1>] [verbose=<0|1>] [keep=<0|1>] -# -#------------------------------------------------------------------------------- - -include ../../common.mk - - -#------------------------------------------------------------------------------- -# Includes -#------------------------------------------------------------------------------- - -INC += -I$(CUB_DIR) -I$(CUB_DIR)test - - - -#------------------------------------------------------------------------------- -# Dependency Lists -#------------------------------------------------------------------------------- - -rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) - -DEPS = $(CUB_DEPS) \ - $(CUB_DIR)test/Makefile \ - $(CUB_DIR)test/test_util.h \ - $(CUB_DIR)test/mersenne.h \ - -ALL = example_block_radix_sort \ - example_block_reduce \ - example_block_scan - - - -#------------------------------------------------------------------------------- -# make default -#------------------------------------------------------------------------------- - -default: - - -#------------------------------------------------------------------------------- -# make clean -#------------------------------------------------------------------------------- - -clean : - rm -f bin/*$(CPU_ARCH_SUFFIX)* - rm -f *.i* *.cubin *.cu.c *.cudafe* *.fatbin.c *.ptx *.hash *.cu.cpp *.o - - -#------------------------------------------------------------------------------- -# make all -#------------------------------------------------------------------------------- - -all : $(ALL) - -#------------------------------------------------------------------------------- -# make run -#------------------------------------------------------------------------------- - -run : - for i in $(ALL); do ./bin/$${i}_$(BIN_SUFFIX) --device=$(device) || exit 1; done - - - - -#------------------------------------------------------------------------------- -# make example_block_reduce -#------------------------------------------------------------------------------- - -example_block_reduce: bin/example_block_reduce_$(BIN_SUFFIX) - -bin/example_block_reduce_$(BIN_SUFFIX) : example_block_reduce.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_block_reduce_$(BIN_SUFFIX) example_block_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make example_block_scan -#------------------------------------------------------------------------------- - -example_block_scan: bin/example_block_scan_$(BIN_SUFFIX) - -bin/example_block_scan_$(BIN_SUFFIX) : example_block_scan.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_block_scan_$(BIN_SUFFIX) example_block_scan.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make example_block_radix_sort -#------------------------------------------------------------------------------- - -example_block_radix_sort: bin/example_block_radix_sort_$(BIN_SUFFIX) - -bin/example_block_radix_sort_$(BIN_SUFFIX) : example_block_radix_sort.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_block_radix_sort_$(BIN_SUFFIX) example_block_radix_sort.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - diff --git a/ml-xgboost/cub/examples/block/example_block_radix_sort.cu b/ml-xgboost/cub/examples/block/example_block_radix_sort.cu deleted file mode 100644 index a4fe088..0000000 --- a/ml-xgboost/cub/examples/block/example_block_radix_sort.cu +++ /dev/null @@ -1,323 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple demonstration of cub::BlockRadixSort - * - * To compile using the command line: - * nvcc -arch=sm_XX example_block_radix_sort.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console (define before including cub.h) -#define CUB_STDERR - -#include -#include -#include - -#include -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -/// Verbose output -bool g_verbose = false; - -/// Timing iterations -int g_timing_iterations = 100; - -/// Default grid size -int g_grid_size = 1; - -/// Uniform key samples -bool g_uniform_keys; - - -//--------------------------------------------------------------------- -// Kernels -//--------------------------------------------------------------------- - -/** - * Simple kernel for performing a block-wide sorting over integers - */ -template < - typename Key, - int BLOCK_THREADS, - int ITEMS_PER_THREAD> -__launch_bounds__ (BLOCK_THREADS) -__global__ void BlockSortKernel( - Key *d_in, // Tile of input - Key *d_out, // Tile of output - clock_t *d_elapsed) // Elapsed cycle count of block scan -{ - enum { TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD }; - - // Specialize BlockLoad type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement) - typedef BlockLoad BlockLoadT; - - // Specialize BlockRadixSort type for our thread block - typedef BlockRadixSort BlockRadixSortT; - - // Shared memory - __shared__ union - { - typename BlockLoadT::TempStorage load; - typename BlockRadixSortT::TempStorage sort; - } temp_storage; - - // Per-thread tile items - Key items[ITEMS_PER_THREAD]; - - // Our current block's offset - int block_offset = blockIdx.x * TILE_SIZE; - - // Load items into a blocked arrangement - BlockLoadT(temp_storage.load).Load(d_in + block_offset, items); - - // Barrier for smem reuse - __syncthreads(); - - // Start cycle timer - clock_t start = clock(); - - // Sort keys - BlockRadixSortT(temp_storage.sort).SortBlockedToStriped(items); - - // Stop cycle timer - clock_t stop = clock(); - - // Store output in striped fashion - StoreDirectStriped(threadIdx.x, d_out + block_offset, items); - - // Store elapsed clocks - if (threadIdx.x == 0) - { - d_elapsed[blockIdx.x] = (start > stop) ? start - stop : stop - start; - } -} - - - -//--------------------------------------------------------------------- -// Host utilities -//--------------------------------------------------------------------- - - -/** - * Initialize sorting problem (and solution). - */ -template -void Initialize( - Key *h_in, - Key *h_reference, - int num_items, - int tile_size) -{ - for (int i = 0; i < num_items; ++i) - { - if (g_uniform_keys) - { - h_in[i] = 0; - } - else - { - RandomBits(h_in[i]); - } - h_reference[i] = h_in[i]; - } - - // Only sort the first tile - std::sort(h_reference, h_reference + tile_size); -} - - -/** - * Test BlockScan - */ -template < - typename Key, - int BLOCK_THREADS, - int ITEMS_PER_THREAD> -void Test() -{ - const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; - - // Allocate host arrays - Key *h_in = new Key[TILE_SIZE * g_grid_size]; - Key *h_reference = new Key[TILE_SIZE * g_grid_size]; - clock_t *h_elapsed = new clock_t[g_grid_size]; - - // Initialize problem and reference output on host - Initialize(h_in, h_reference, TILE_SIZE * g_grid_size, TILE_SIZE); - - // Initialize device arrays - Key *d_in = NULL; - Key *d_out = NULL; - clock_t *d_elapsed = NULL; - CubDebugExit(cudaMalloc((void**)&d_in, sizeof(Key) * TILE_SIZE * g_grid_size)); - CubDebugExit(cudaMalloc((void**)&d_out, sizeof(Key) * TILE_SIZE * g_grid_size)); - CubDebugExit(cudaMalloc((void**)&d_elapsed, sizeof(clock_t) * g_grid_size)); - - // Display input problem data - if (g_verbose) - { - printf("Input data: "); - for (int i = 0; i < TILE_SIZE; i++) - std::cout << h_in[i] << ", "; - printf("\n\n"); - } - - // Kernel props - int max_sm_occupancy; - CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockSortKernel, BLOCK_THREADS)); - - // Copy problem to device - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(Key) * TILE_SIZE * g_grid_size, cudaMemcpyHostToDevice)); - - printf("BlockRadixSort %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n", - TILE_SIZE * g_grid_size, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy); - fflush(stdout); - - // Run kernel once to prime caches and check result - BlockSortKernel<<>>( - d_in, - d_out, - d_elapsed); - - // Check for kernel errors and STDIO from the kernel, if any - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Check results - printf("\tOutput items: "); - int compare = CompareDeviceResults(h_reference, d_out, TILE_SIZE, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - fflush(stdout); - - // Run this several times and average the performance results - GpuTimer timer; - float elapsed_millis = 0.0; - unsigned long long elapsed_clocks = 0; - - for (int i = 0; i < g_timing_iterations; ++i) - { - timer.Start(); - - // Run kernel - BlockSortKernel<<>>( - d_in, - d_out, - d_elapsed); - - timer.Stop(); - elapsed_millis += timer.ElapsedMillis(); - - // Copy clocks from device - CubDebugExit(cudaMemcpy(h_elapsed, d_elapsed, sizeof(clock_t) * g_grid_size, cudaMemcpyDeviceToHost)); - for (int i = 0; i < g_grid_size; i++) - elapsed_clocks += h_elapsed[i]; - } - - // Check for kernel errors and STDIO from the kernel, if any - CubDebugExit(cudaDeviceSynchronize()); - - // Display timing results - float avg_millis = elapsed_millis / g_timing_iterations; - float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0f; - double avg_clocks = double(elapsed_clocks) / g_timing_iterations / g_grid_size; - double avg_clocks_per_item = avg_clocks / TILE_SIZE; - - printf("\tAverage BlockRadixSort::SortBlocked clocks: %.3f\n", avg_clocks); - printf("\tAverage BlockRadixSort::SortBlocked clocks per item: %.3f\n", avg_clocks_per_item); - printf("\tAverage kernel millis: %.4f\n", avg_millis); - printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec); - fflush(stdout); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (h_elapsed) delete[] h_elapsed; - if (d_in) CubDebugExit(cudaFree(d_in)); - if (d_out) CubDebugExit(cudaFree(d_out)); - if (d_elapsed) CubDebugExit(cudaFree(d_elapsed)); -} - - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - g_uniform_keys = args.CheckCmdLineFlag("uniform"); - args.GetCmdLineArgument("i", g_timing_iterations); - args.GetCmdLineArgument("grid-size", g_grid_size); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--i=]" - "[--grid-size=]" - "[--v] " - "\n", argv[0], g_timing_iterations, g_grid_size); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - fflush(stdout); - - // Run tests - printf("\nuint32:\n"); fflush(stdout); - Test(); - printf("\n"); fflush(stdout); - - printf("\nfp32:\n"); fflush(stdout); - Test(); - printf("\n"); fflush(stdout); - - printf("\nuint8:\n"); fflush(stdout); - Test(); - printf("\n"); fflush(stdout); - - return 0; -} - diff --git a/ml-xgboost/cub/examples/block/example_block_reduce.cu b/ml-xgboost/cub/examples/block/example_block_reduce.cu deleted file mode 100644 index 723f6e2..0000000 --- a/ml-xgboost/cub/examples/block/example_block_reduce.cu +++ /dev/null @@ -1,290 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple demonstration of cub::BlockReduce - * - * To compile using the command line: - * nvcc -arch=sm_XX example_block_reduce.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console (define before including cub.h) -#define CUB_STDERR - -#include -#include - -#include -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -/// Verbose output -bool g_verbose = false; - -/// Timing iterations -int g_timing_iterations = 100; - -/// Default grid size -int g_grid_size = 1; - - - -//--------------------------------------------------------------------- -// Kernels -//--------------------------------------------------------------------- - -/** - * Simple kernel for performing a block-wide exclusive prefix sum over integers - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockReduceAlgorithm ALGORITHM> -__global__ void BlockSumKernel( - int *d_in, // Tile of input - int *d_out, // Tile aggregate - clock_t *d_elapsed) // Elapsed cycle count of block reduction -{ - // Specialize BlockReduce type for our thread block - typedef BlockReduce BlockReduceT; - - // Shared memory - __shared__ typename BlockReduceT::TempStorage temp_storage; - - // Per-thread tile data - int data[ITEMS_PER_THREAD]; - LoadDirectStriped(threadIdx.x, d_in, data); - - // Start cycle timer - clock_t start = clock(); - - // Compute sum - int aggregate = BlockReduceT(temp_storage).Sum(data); - - // Stop cycle timer - clock_t stop = clock(); - - // Store aggregate and elapsed clocks - if (threadIdx.x == 0) - { - *d_elapsed = (start > stop) ? start - stop : stop - start; - *d_out = aggregate; - } -} - - - -//--------------------------------------------------------------------- -// Host utilities -//--------------------------------------------------------------------- - -/** - * Initialize reduction problem (and solution). - * Returns the aggregate - */ -int Initialize(int *h_in, int num_items) -{ - int inclusive = 0; - - for (int i = 0; i < num_items; ++i) - { - h_in[i] = i % 17; - inclusive += h_in[i]; - } - - return inclusive; -} - - -/** - * Test thread block reduction - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockReduceAlgorithm ALGORITHM> -void Test() -{ - const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; - - // Allocate host arrays - int *h_in = new int[TILE_SIZE]; - int *h_gpu = new int[TILE_SIZE + 1]; - - // Initialize problem and reference output on host - int h_aggregate = Initialize(h_in, TILE_SIZE); - - // Initialize device arrays - int *d_in = NULL; - int *d_out = NULL; - clock_t *d_elapsed = NULL; - cudaMalloc((void**)&d_in, sizeof(int) * TILE_SIZE); - cudaMalloc((void**)&d_out, sizeof(int) * 1); - cudaMalloc((void**)&d_elapsed, sizeof(clock_t)); - - // Display input problem data - if (g_verbose) - { - printf("Input data: "); - for (int i = 0; i < TILE_SIZE; i++) - printf("%d, ", h_in[i]); - printf("\n\n"); - } - - // Kernel props - int max_sm_occupancy; - CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockSumKernel, BLOCK_THREADS)); - - // Copy problem to device - cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice); - - printf("BlockReduce algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n", - (ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : "BLOCK_REDUCE_WARP_REDUCTIONS", - TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy); - - // Run aggregate/prefix kernel - BlockSumKernel<<>>( - d_in, - d_out, - d_elapsed); - - // Check total aggregate - printf("\tAggregate: "); - int compare = CompareDeviceResults(&h_aggregate, d_out, 1, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Run this several times and average the performance results - GpuTimer timer; - float elapsed_millis = 0.0; - clock_t elapsed_clocks = 0; - - for (int i = 0; i < g_timing_iterations; ++i) - { - // Copy problem to device - cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice); - - timer.Start(); - - // Run aggregate/prefix kernel - BlockSumKernel<<>>( - d_in, - d_out, - d_elapsed); - - timer.Stop(); - elapsed_millis += timer.ElapsedMillis(); - - // Copy clocks from device - clock_t clocks; - CubDebugExit(cudaMemcpy(&clocks, d_elapsed, sizeof(clock_t), cudaMemcpyDeviceToHost)); - elapsed_clocks += clocks; - - } - - // Check for kernel errors and STDIO from the kernel, if any - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Display timing results - float avg_millis = elapsed_millis / g_timing_iterations; - float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0f; - float avg_clocks = float(elapsed_clocks) / g_timing_iterations; - float avg_clocks_per_item = avg_clocks / TILE_SIZE; - - printf("\tAverage BlockReduce::Sum clocks: %.3f\n", avg_clocks); - printf("\tAverage BlockReduce::Sum clocks per item: %.3f\n", avg_clocks_per_item); - printf("\tAverage kernel millis: %.4f\n", avg_millis); - printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec); - - // Cleanup - if (h_in) delete[] h_in; - if (h_gpu) delete[] h_gpu; - if (d_in) cudaFree(d_in); - if (d_out) cudaFree(d_out); - if (d_elapsed) cudaFree(d_elapsed); -} - - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("i", g_timing_iterations); - args.GetCmdLineArgument("grid-size", g_grid_size); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--i=] " - "[--grid-size=] " - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Run tests - Test<1024, 1, BLOCK_REDUCE_RAKING>(); - Test<512, 2, BLOCK_REDUCE_RAKING>(); - Test<256, 4, BLOCK_REDUCE_RAKING>(); - Test<128, 8, BLOCK_REDUCE_RAKING>(); - Test<64, 16, BLOCK_REDUCE_RAKING>(); - Test<32, 32, BLOCK_REDUCE_RAKING>(); - Test<16, 64, BLOCK_REDUCE_RAKING>(); - - printf("-------------\n"); - - Test<1024, 1, BLOCK_REDUCE_WARP_REDUCTIONS>(); - Test<512, 2, BLOCK_REDUCE_WARP_REDUCTIONS>(); - Test<256, 4, BLOCK_REDUCE_WARP_REDUCTIONS>(); - Test<128, 8, BLOCK_REDUCE_WARP_REDUCTIONS>(); - Test<64, 16, BLOCK_REDUCE_WARP_REDUCTIONS>(); - Test<32, 32, BLOCK_REDUCE_WARP_REDUCTIONS>(); - Test<16, 64, BLOCK_REDUCE_WARP_REDUCTIONS>(); - - return 0; -} - diff --git a/ml-xgboost/cub/examples/block/example_block_scan.cu b/ml-xgboost/cub/examples/block/example_block_scan.cu deleted file mode 100644 index 104d4de..0000000 --- a/ml-xgboost/cub/examples/block/example_block_scan.cu +++ /dev/null @@ -1,334 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple demonstration of cub::BlockScan - * - * To compile using the command line: - * nvcc -arch=sm_XX example_block_scan.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console (define before including cub.h) -#define CUB_STDERR - -#include -#include - -#include -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -/// Verbose output -bool g_verbose = false; - -/// Timing iterations -int g_timing_iterations = 100; - -/// Default grid size -int g_grid_size = 1; - - - -//--------------------------------------------------------------------- -// Kernels -//--------------------------------------------------------------------- - -/** - * Simple kernel for performing a block-wide exclusive prefix sum over integers - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockScanAlgorithm ALGORITHM> -__global__ void BlockPrefixSumKernel( - int *d_in, // Tile of input - int *d_out, // Tile of output - clock_t *d_elapsed) // Elapsed cycle count of block scan -{ - // Specialize BlockLoad type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement) - typedef BlockLoad BlockLoadT; - - // Specialize BlockStore type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement) - typedef BlockStore BlockStoreT; - - // Specialize BlockScan type for our thread block - typedef BlockScan BlockScanT; - - // Shared memory - __shared__ union - { - typename BlockLoadT::TempStorage load; - typename BlockStoreT::TempStorage store; - typename BlockScanT::TempStorage scan; - } temp_storage; - - // Per-thread tile data - int data[ITEMS_PER_THREAD]; - - // Load items into a blocked arrangement - BlockLoadT(temp_storage.load).Load(d_in, data); - - // Barrier for smem reuse - __syncthreads(); - - // Start cycle timer - clock_t start = clock(); - - // Compute exclusive prefix sum - int aggregate; - BlockScanT(temp_storage.scan).ExclusiveSum(data, data, aggregate); - - // Stop cycle timer - clock_t stop = clock(); - - // Barrier for smem reuse - __syncthreads(); - - // Store items from a blocked arrangement - BlockStoreT(temp_storage.store).Store(d_out, data); - - // Store aggregate and elapsed clocks - if (threadIdx.x == 0) - { - *d_elapsed = (start > stop) ? start - stop : stop - start; - d_out[BLOCK_THREADS * ITEMS_PER_THREAD] = aggregate; - } -} - - - -//--------------------------------------------------------------------- -// Host utilities -//--------------------------------------------------------------------- - -/** - * Initialize exclusive prefix sum problem (and solution). - * Returns the aggregate - */ -int Initialize( - int *h_in, - int *h_reference, - int num_items) -{ - int inclusive = 0; - - for (int i = 0; i < num_items; ++i) - { - h_in[i] = i % 17; - - h_reference[i] = inclusive; - inclusive += h_in[i]; - } - - return inclusive; -} - - -/** - * Test thread block scan - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockScanAlgorithm ALGORITHM> -void Test() -{ - const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; - - // Allocate host arrays - int *h_in = new int[TILE_SIZE]; - int *h_reference = new int[TILE_SIZE]; - int *h_gpu = new int[TILE_SIZE + 1]; - - // Initialize problem and reference output on host - int h_aggregate = Initialize(h_in, h_reference, TILE_SIZE); - - // Initialize device arrays - int *d_in = NULL; - int *d_out = NULL; - clock_t *d_elapsed = NULL; - cudaMalloc((void**)&d_in, sizeof(int) * TILE_SIZE); - cudaMalloc((void**)&d_out, sizeof(int) * (TILE_SIZE + 1)); - cudaMalloc((void**)&d_elapsed, sizeof(clock_t)); - - // Display input problem data - if (g_verbose) - { - printf("Input data: "); - for (int i = 0; i < TILE_SIZE; i++) - printf("%d, ", h_in[i]); - printf("\n\n"); - } - - // Kernel props - int max_sm_occupancy; - CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockPrefixSumKernel, BLOCK_THREADS)); - - // Copy problem to device - cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice); - - printf("BlockScan algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n", - (ALGORITHM == BLOCK_SCAN_RAKING) ? "BLOCK_SCAN_RAKING" : (ALGORITHM == BLOCK_SCAN_RAKING_MEMOIZE) ? "BLOCK_SCAN_RAKING_MEMOIZE" : "BLOCK_SCAN_WARP_SCANS", - TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy); - - // Run aggregate/prefix kernel - BlockPrefixSumKernel<<>>( - d_in, - d_out, - d_elapsed); - - // Check results - printf("\tOutput items: "); - int compare = CompareDeviceResults(h_reference, d_out, TILE_SIZE, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Check total aggregate - printf("\tAggregate: "); - compare = CompareDeviceResults(&h_aggregate, d_out + TILE_SIZE, 1, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Run this several times and average the performance results - GpuTimer timer; - float elapsed_millis = 0.0; - clock_t elapsed_clocks = 0; - - for (int i = 0; i < g_timing_iterations; ++i) - { - // Copy problem to device - cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice); - - timer.Start(); - - // Run aggregate/prefix kernel - BlockPrefixSumKernel<<>>( - d_in, - d_out, - d_elapsed); - - timer.Stop(); - elapsed_millis += timer.ElapsedMillis(); - - // Copy clocks from device - clock_t clocks; - CubDebugExit(cudaMemcpy(&clocks, d_elapsed, sizeof(clock_t), cudaMemcpyDeviceToHost)); - elapsed_clocks += clocks; - - } - - // Check for kernel errors and STDIO from the kernel, if any - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Display timing results - float avg_millis = elapsed_millis / g_timing_iterations; - float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0f; - float avg_clocks = float(elapsed_clocks) / g_timing_iterations; - float avg_clocks_per_item = avg_clocks / TILE_SIZE; - - printf("\tAverage BlockScan::Sum clocks: %.3f\n", avg_clocks); - printf("\tAverage BlockScan::Sum clocks per item: %.3f\n", avg_clocks_per_item); - printf("\tAverage kernel millis: %.4f\n", avg_millis); - printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (h_gpu) delete[] h_gpu; - if (d_in) cudaFree(d_in); - if (d_out) cudaFree(d_out); - if (d_elapsed) cudaFree(d_elapsed); -} - - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("i", g_timing_iterations); - args.GetCmdLineArgument("grid-size", g_grid_size); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--i=]" - "[--grid-size=]" - "[--v] " - "\n", argv[0], g_timing_iterations, g_grid_size); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Run tests - Test<1024, 1, BLOCK_SCAN_RAKING>(); - Test<512, 2, BLOCK_SCAN_RAKING>(); - Test<256, 4, BLOCK_SCAN_RAKING>(); - Test<128, 8, BLOCK_SCAN_RAKING>(); - Test<64, 16, BLOCK_SCAN_RAKING>(); - Test<32, 32, BLOCK_SCAN_RAKING>(); - - printf("-------------\n"); - - Test<1024, 1, BLOCK_SCAN_RAKING_MEMOIZE>(); - Test<512, 2, BLOCK_SCAN_RAKING_MEMOIZE>(); - Test<256, 4, BLOCK_SCAN_RAKING_MEMOIZE>(); - Test<128, 8, BLOCK_SCAN_RAKING_MEMOIZE>(); - Test<64, 16, BLOCK_SCAN_RAKING_MEMOIZE>(); - Test<32, 32, BLOCK_SCAN_RAKING_MEMOIZE>(); - - printf("-------------\n"); - - Test<1024, 1, BLOCK_SCAN_WARP_SCANS>(); - Test<512, 2, BLOCK_SCAN_WARP_SCANS>(); - Test<256, 4, BLOCK_SCAN_WARP_SCANS>(); - Test<128, 8, BLOCK_SCAN_WARP_SCANS>(); - Test<64, 16, BLOCK_SCAN_WARP_SCANS>(); - Test<32, 32, BLOCK_SCAN_WARP_SCANS>(); - - - return 0; -} - diff --git a/ml-xgboost/cub/examples/block/reduce_by_key.cu b/ml-xgboost/cub/examples/block/reduce_by_key.cu deleted file mode 100644 index 60351d5..0000000 --- a/ml-xgboost/cub/examples/block/reduce_by_key.cu +++ /dev/null @@ -1,57 +0,0 @@ - - -#include - - -template < - int BLOCK_THREADS, ///< Number of CTA threads - typename KeyT, ///< Key type - typename ValueT> ///< Value type -__global__ void Kernel() -{ - // Tuple type for scanning (pairs accumulated segment-value with segment-index) - typedef cub::KeyValuePair OffsetValuePairT; - - // Reduce-value-by-segment scan operator - typedef cub::ReduceBySegmentOp ReduceBySegmentOpT; - - // Parameterized BlockDiscontinuity type for setting head flags - typedef cub::BlockDiscontinuity< - KeyT, - BLOCK_THREADS> - BlockDiscontinuityKeysT; - - // Parameterized BlockScan type - typedef cub::BlockScan< - OffsetValuePairT, - BLOCK_THREADS, - cub::BLOCK_SCAN_WARP_SCANS> - BlockScanT; - - // Shared memory - __shared__ union - { - typename BlockScanT::TempStorage scan; // Scan storage - typename BlockDiscontinuityKeysT::TempStorage discontinuity; // Discontinuity storage - } temp_storage; - - - // Read data (each thread gets 3 items each, every 9 items is a segment) - KeyT my_keys[3] = {threadIdx.x / 3, threadIdx.x / 3, threadIdx.x / 3}; - ValueT my_values[3] = {1, 1, 1}; - - // Set head segment head flags - int my_flags[3]; - BlockDiscontinuityKeysT(temp_storage.discontinuity).FlagHeads( - my_flags, - my_keys, - cub::Inequality()); - - __syncthreads(); - - - - - - -} diff --git a/ml-xgboost/cub/examples/device/.gitignore b/ml-xgboost/cub/examples/device/.gitignore deleted file mode 100644 index 7032b5a..0000000 --- a/ml-xgboost/cub/examples/device/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -/bin -/Debug -/ipch -/Release -/cuda55.sdf -/cuda55.suo -/cuda60.sdf -/cuda60.suo diff --git a/ml-xgboost/cub/examples/device/Makefile b/ml-xgboost/cub/examples/device/Makefile deleted file mode 100644 index 049430d..0000000 --- a/ml-xgboost/cub/examples/device/Makefile +++ /dev/null @@ -1,197 +0,0 @@ -#/****************************************************************************** -# * Copyright (c) 2011, Duane Merrill. All rights reserved. -# * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. -# * -# * Redistribution and use in source and binary forms, with or without -# * modification, are permitted provided that the following conditions are met: -# * * Redistributions of source code must retain the above copyright -# * notice, this list of conditions and the following disclaimer. -# * * Redistributions in binary form must reproduce the above copyright -# * notice, this list of conditions and the following disclaimer in the -# * documentation and/or other materials provided with the distribution. -# * * Neither the name of the NVIDIA CORPORATION nor the -# * names of its contributors may be used to endorse or promote products -# * derived from this software without specific prior written permission. -# * -# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY -# * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# * -#******************************************************************************/ - -#------------------------------------------------------------------------------- -# -# Makefile usage -# -# make [sm=] [cdp=<0|1>] [force32=<0|1>] [abi=<0|1>] [open64=<0|1>] [verbose=<0|1>] [keep=<0|1>] -# -#------------------------------------------------------------------------------- - -include ../../common.mk - - -#------------------------------------------------------------------------------- -# Includes -#------------------------------------------------------------------------------- - -INC += -I$(CUB_DIR) -I$(CUB_DIR)test - - - -#------------------------------------------------------------------------------- -# Dependency Lists -#------------------------------------------------------------------------------- - -rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) - -DEPS = $(CUB_DEPS) \ - $(CUB_DIR)test/Makefile \ - $(CUB_DIR)test/test_util.h \ - $(CUB_DIR)test/mersenne.h \ - -ALL = example_device_partition_flagged \ - example_device_partition_if \ - example_device_radix_sort \ - example_device_reduce \ - example_device_scan \ - example_device_select_unique \ - example_device_select_flagged \ - example_device_select_if \ - example_device_sort_find_non_trivial_runs - - - -#------------------------------------------------------------------------------- -# make default -#------------------------------------------------------------------------------- - -default: - - -#------------------------------------------------------------------------------- -# make clean -#------------------------------------------------------------------------------- - -clean : - rm -f bin/*$(CPU_ARCH_SUFFIX)* - rm -f *.i* *.cubin *.cu.c *.cudafe* *.fatbin.c *.ptx *.hash *.cu.cpp *.o - - -#------------------------------------------------------------------------------- -# make all -#------------------------------------------------------------------------------- - -all : $(ALL) - -#------------------------------------------------------------------------------- -# make run -#------------------------------------------------------------------------------- - -run : - for i in $(ALL); do ./bin/$${i}_$(BIN_SUFFIX) --device=$(device) || exit 1; done - - -#------------------------------------------------------------------------------- -# make example_device_reduce -#------------------------------------------------------------------------------- - -example_device_reduce: bin/example_device_reduce_$(BIN_SUFFIX) - -bin/example_device_reduce_$(BIN_SUFFIX) : example_device_reduce.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_reduce_$(BIN_SUFFIX) example_device_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make example_device_partition_flagged -#------------------------------------------------------------------------------- - -example_device_partition_flagged: bin/example_device_partition_flagged_$(BIN_SUFFIX) - -bin/example_device_partition_flagged_$(BIN_SUFFIX) : example_device_partition_flagged.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_partition_flagged_$(BIN_SUFFIX) example_device_partition_flagged.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - -#------------------------------------------------------------------------------- -# make example_device_partition_if -#------------------------------------------------------------------------------- - -example_device_partition_if: bin/example_device_partition_if_$(BIN_SUFFIX) - -bin/example_device_partition_if_$(BIN_SUFFIX) : example_device_partition_if.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_partition_if_$(BIN_SUFFIX) example_device_partition_if.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - -#------------------------------------------------------------------------------- -# make example_device_scan -#------------------------------------------------------------------------------- - -example_device_scan: bin/example_device_scan_$(BIN_SUFFIX) - -bin/example_device_scan_$(BIN_SUFFIX) : example_device_scan.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_scan_$(BIN_SUFFIX) example_device_scan.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make example_device_radix_sort -#------------------------------------------------------------------------------- - -example_device_radix_sort: bin/example_device_radix_sort_$(BIN_SUFFIX) - -bin/example_device_radix_sort_$(BIN_SUFFIX) : example_device_radix_sort.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_radix_sort_$(BIN_SUFFIX) example_device_radix_sort.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make example_device_select_unique -#------------------------------------------------------------------------------- - -example_device_select_unique: bin/example_device_select_unique_$(BIN_SUFFIX) - -bin/example_device_select_unique_$(BIN_SUFFIX) : example_device_select_unique.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_select_unique_$(BIN_SUFFIX) example_device_select_unique.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make example_device_select_flagged -#------------------------------------------------------------------------------- - -example_device_select_flagged: bin/example_device_select_flagged_$(BIN_SUFFIX) - -bin/example_device_select_flagged_$(BIN_SUFFIX) : example_device_select_flagged.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_select_flagged_$(BIN_SUFFIX) example_device_select_flagged.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - -#------------------------------------------------------------------------------- -# make example_device_select_if -#------------------------------------------------------------------------------- - -example_device_select_if: bin/example_device_select_if_$(BIN_SUFFIX) - -bin/example_device_select_if_$(BIN_SUFFIX) : example_device_select_if.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_select_if_$(BIN_SUFFIX) example_device_select_if.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make example_device_sort_find_non_trivial_runs -#------------------------------------------------------------------------------- - -example_device_sort_find_non_trivial_runs: bin/example_device_sort_find_non_trivial_runs_$(BIN_SUFFIX) - -bin/example_device_sort_find_non_trivial_runs_$(BIN_SUFFIX) : example_device_sort_find_non_trivial_runs.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_device_sort_find_non_trivial_runs_$(BIN_SUFFIX) example_device_sort_find_non_trivial_runs.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - - diff --git a/ml-xgboost/cub/examples/device/example_device_partition_flagged.cu b/ml-xgboost/cub/examples/device/example_device_partition_flagged.cu deleted file mode 100644 index 72737b7..0000000 --- a/ml-xgboost/cub/examples/device/example_device_partition_flagged.cu +++ /dev/null @@ -1,233 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple example of DevicePartition::Flagged(). - * - * Partition flagged items from from a sequence of int keys using a - * corresponding sequence of unsigned char flags. - * - * To compile using the command line: - * nvcc -arch=sm_XX example_device_partition_flagged.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include - -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; // Whether to display input/output to console -CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - - -/** - * Initialize problem, setting flags at distances of random length - * chosen from [1..max_segment] - */ -void Initialize( - int *h_in, - unsigned char *h_flags, - int num_items, - int max_segment) -{ - unsigned short max_short = (unsigned short) -1; - - int key = 0; - int i = 0; - while (i < num_items) - { - // Select number of repeating occurrences - unsigned short repeat; - RandomBits(repeat); - repeat = (unsigned short) ((float(repeat) * (float(max_segment) / float(max_short)))); - repeat = CUB_MAX(1, repeat); - - int j = i; - while (j < CUB_MIN(i + repeat, num_items)) - { - h_flags[j] = 0; - h_in[j] = key; - j++; - } - - h_flags[i] = 1; - i = j; - key++; - } - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("Flags:\n"); - DisplayResults(h_flags, num_items); - printf("\n\n"); - } -} - - -/** - * Solve unique problem - */ -int Solve( - int *h_in, - unsigned char *h_flags, - int *h_reference, - int num_items) -{ - int num_selected = 0; - for (int i = 0; i < num_items; ++i) - { - if (h_flags[i]) - { - h_reference[num_selected] = h_in[i]; - num_selected++; - } - else - { - h_reference[num_items - (i - num_selected) - 1] = h_in[i]; - } - } - - return num_selected; -} - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = 150; - int max_segment = 40; // Maximum segment length - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("maxseg", max_segment); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--device=] " - "[--maxseg=] " - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Allocate host arrays - int *h_in = new int[num_items]; - int *h_reference = new int[num_items]; - unsigned char *h_flags = new unsigned char[num_items]; - - // Initialize problem and solution - Initialize(h_in, h_flags, num_items, max_segment); - int num_selected = Solve(h_in, h_flags, h_reference, num_items); - - printf("cub::DevicePartition::Flagged %d items, %d selected (avg distance %d), %d-byte elements\n", - num_items, num_selected, (num_selected > 0) ? num_items / num_selected : 0, (int) sizeof(int)); - fflush(stdout); - - // Allocate problem device arrays - int *d_in = NULL; - unsigned char *d_flags = NULL; - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(unsigned char) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_flags, h_flags, sizeof(unsigned char) * num_items, cudaMemcpyHostToDevice)); - - // Allocate device output array and num selected - int *d_out = NULL; - int *d_num_selected_out = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int))); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Run - CubDebugExit(DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items)); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose); - printf("\t Data %s ", compare ? "FAIL" : "PASS"); - compare |= CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose); - printf("\t Count %s ", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags)); - - printf("\n\n"); - - return 0; -} - - - diff --git a/ml-xgboost/cub/examples/device/example_device_partition_if.cu b/ml-xgboost/cub/examples/device/example_device_partition_if.cu deleted file mode 100644 index 4dfa351..0000000 --- a/ml-xgboost/cub/examples/device/example_device_partition_if.cu +++ /dev/null @@ -1,244 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple example of DevicePartition::If(). - * - * Partitions items from from a sequence of int keys using a - * section functor (greater-than) - * - * To compile using the command line: - * nvcc -arch=sm_XX example_device_select_if.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include - -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; // Whether to display input/output to console -CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory - - -/// Selection functor type -struct GreaterThan -{ - int compare; - - __host__ __device__ __forceinline__ - GreaterThan(int compare) : compare(compare) {} - - __host__ __device__ __forceinline__ - bool operator()(const int &a) const { - return (a > compare); - } -}; - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - -/** - * Initialize problem, setting runs of random length chosen from [1..max_segment] - */ -void Initialize( - int *h_in, - int num_items, - int max_segment) -{ - int key = 0; - int i = 0; - while (i < num_items) - { - // Randomly select number of repeating occurrences uniformly from [1..max_segment] - unsigned short max_short = (unsigned short) -1; - unsigned short repeat; - RandomBits(repeat); - repeat = (unsigned short) ((float(repeat) * (float(max_segment) / float(max_short)))); - repeat = CUB_MAX(1, repeat); - - int j = i; - while (j < CUB_MIN(i + repeat, num_items)) - { - h_in[j] = key; - j++; - } - - i = j; - key++; - } - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n\n"); - } -} - - -/** - * Solve unique problem - */ -template -int Solve( - int *h_in, - SelectOp select_op, - int *h_reference, - int num_items) -{ - int num_selected = 0; - for (int i = 0; i < num_items; ++i) - { - if (select_op(h_in[i])) - { - h_reference[num_selected] = h_in[i]; - num_selected++; - } - else - { - h_reference[num_items - (i - num_selected) - 1] = h_in[i]; - } - } - - return num_selected; -} - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = 150; - int max_segment = 40; // Maximum segment length - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("maxseg", max_segment); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--device=] " - "[--maxseg=]" - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Allocate host arrays - int *h_in = new int[num_items]; - int *h_reference = new int[num_items]; - - // DevicePartition a pivot index - unsigned int pivot_index; - unsigned int max_int = (unsigned int) -1; - RandomBits(pivot_index); - pivot_index = (unsigned int) ((float(pivot_index) * (float(num_items - 1) / float(max_int)))); - printf("Pivot idx: %d\n", pivot_index); fflush(stdout); - - // Initialize problem and solution - Initialize(h_in, num_items, max_segment); - GreaterThan select_op(h_in[pivot_index]); - - int num_selected = Solve(h_in, select_op, h_reference, num_items); - - printf("cub::DevicePartition::If %d items, %d selected (avg run length %d), %d-byte elements\n", - num_items, num_selected, (num_selected > 0) ? num_items / num_selected : 0, (int) sizeof(int)); - fflush(stdout); - - // Allocate problem device arrays - int *d_in = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice)); - - // Allocate device output array and num selected - int *d_out = NULL; - int *d_num_selected_out = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int))); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Run - CubDebugExit(DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op)); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose); - printf("\t Data %s ", compare ? "FAIL" : "PASS"); - compare = compare | CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose); - printf("\t Count %s ", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - printf("\n\n"); - - return 0; -} - - - diff --git a/ml-xgboost/cub/examples/device/example_device_radix_sort.cu b/ml-xgboost/cub/examples/device/example_device_radix_sort.cu deleted file mode 100644 index 7f1c158..0000000 --- a/ml-xgboost/cub/examples/device/example_device_radix_sort.cu +++ /dev/null @@ -1,226 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple example of DeviceRadixSort::SortPairs(). - * - * Sorts an array of float keys paired with a corresponding array of int values. - * - * To compile using the command line: - * nvcc -arch=sm_XX example_device_radix_sort.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include - -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; // Whether to display input/output to console -CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - -/** - * Simple key-value pairing for floating point types. Distinguishes - * between positive and negative zero. - */ -struct Pair -{ - float key; - int value; - - bool operator<(const Pair &b) const - { - if (key < b.key) - return true; - - if (key > b.key) - return false; - - // Return true if key is negative zero and b.key is positive zero - unsigned int key_bits = *reinterpret_cast(const_cast(&key)); - unsigned int b_key_bits = *reinterpret_cast(const_cast(&b.key)); - unsigned int HIGH_BIT = 1u << 31; - - return ((key_bits & HIGH_BIT) != 0) && ((b_key_bits & HIGH_BIT) == 0); - } -}; - - -/** - * Initialize key-value sorting problem. - */ -void Initialize( - float *h_keys, - int *h_values, - float *h_reference_keys, - int *h_reference_values, - int num_items) -{ - Pair *h_pairs = new Pair[num_items]; - - for (int i = 0; i < num_items; ++i) - { - RandomBits(h_keys[i]); - RandomBits(h_values[i]); - h_pairs[i].key = h_keys[i]; - h_pairs[i].value = h_values[i]; - } - - if (g_verbose) - { - printf("Input keys:\n"); - DisplayResults(h_keys, num_items); - printf("\n\n"); - - printf("Input values:\n"); - DisplayResults(h_values, num_items); - printf("\n\n"); - } - - std::stable_sort(h_pairs, h_pairs + num_items); - - for (int i = 0; i < num_items; ++i) - { - h_reference_keys[i] = h_pairs[i].key; - h_reference_values[i] = h_pairs[i].value; - } - - delete[] h_pairs; -} - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = 150; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--device=] " - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - printf("cub::DeviceRadixSort::SortPairs() %d items (%d-byte keys %d-byte values)\n", - num_items, int(sizeof(float)), int(sizeof(int))); - fflush(stdout); - - // Allocate host arrays - float *h_keys = new float[num_items]; - float *h_reference_keys = new float[num_items]; - int *h_values = new int[num_items]; - int *h_reference_values = new int[num_items]; - - // Initialize problem and solution on host - Initialize(h_keys, h_values, h_reference_keys, h_reference_values, num_items); - - // Allocate device arrays - DoubleBuffer d_keys; - DoubleBuffer d_values; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys.d_buffers[0], sizeof(float) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys.d_buffers[1], sizeof(float) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values.d_buffers[0], sizeof(int) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values.d_buffers[1], sizeof(int) * num_items)); - - // Allocate temporary storage - size_t temp_storage_bytes = 0; - void *d_temp_storage = NULL; - - CubDebugExit(DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Initialize device arrays - CubDebugExit(cudaMemcpy(d_keys.d_buffers[d_keys.selector], h_keys, sizeof(float) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_values.d_buffers[d_values.selector], h_values, sizeof(int) * num_items, cudaMemcpyHostToDevice)); - - // Run - CubDebugExit(DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items)); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults(h_reference_keys, d_keys.Current(), num_items, true, g_verbose); - printf("\t Compare keys (selector %d): %s\n", d_keys.selector, compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - compare = CompareDeviceResults(h_reference_values, d_values.Current(), num_items, true, g_verbose); - printf("\t Compare values (selector %d): %s\n", d_values.selector, compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (h_keys) delete[] h_keys; - if (h_reference_keys) delete[] h_reference_keys; - if (h_values) delete[] h_values; - if (h_reference_values) delete[] h_reference_values; - - if (d_keys.d_buffers[0]) CubDebugExit(g_allocator.DeviceFree(d_keys.d_buffers[0])); - if (d_keys.d_buffers[1]) CubDebugExit(g_allocator.DeviceFree(d_keys.d_buffers[1])); - if (d_values.d_buffers[0]) CubDebugExit(g_allocator.DeviceFree(d_values.d_buffers[0])); - if (d_values.d_buffers[1]) CubDebugExit(g_allocator.DeviceFree(d_values.d_buffers[1])); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - printf("\n\n"); - - return 0; -} - - - diff --git a/ml-xgboost/cub/examples/device/example_device_reduce.cu b/ml-xgboost/cub/examples/device/example_device_reduce.cu deleted file mode 100644 index 44dc946..0000000 --- a/ml-xgboost/cub/examples/device/example_device_reduce.cu +++ /dev/null @@ -1,180 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple example of DeviceReduce::Sum(). - * - * Sums an array of int keys. - * - * To compile using the command line: - * nvcc -arch=sm_XX example_device_reduce.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include - -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; // Whether to display input/output to console -CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - -/** - * Initialize problem - */ -void Initialize( - int *h_in, - int num_items) -{ - for (int i = 0; i < num_items; ++i) - h_in[i] = i; - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n\n"); - } -} - - -/** - * Compute solution - */ -void Solve( - int *h_in, - int &h_reference, - int num_items) -{ - for (int i = 0; i < num_items; ++i) - { - if (i == 0) - h_reference = h_in[0]; - else - h_reference += h_in[i]; - } -} - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = 150; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--device=] " - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - printf("cub::DeviceReduce::Sum() %d items (%d-byte elements)\n", - num_items, (int) sizeof(int)); - fflush(stdout); - - // Allocate host arrays - int* h_in = new int[num_items]; - int h_reference; - - // Initialize problem and solution - Initialize(h_in, num_items); - Solve(h_in, h_reference, num_items); - - // Allocate problem device arrays - int *d_in = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice)); - - // Allocate device output array - int *d_out = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * 1)); - - // Request and allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Run - CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items)); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults(&h_reference, d_out, 1, g_verbose, g_verbose); - printf("\t%s", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (h_in) delete[] h_in; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - printf("\n\n"); - - return 0; -} - - - diff --git a/ml-xgboost/cub/examples/device/example_device_scan.cu b/ml-xgboost/cub/examples/device/example_device_scan.cu deleted file mode 100644 index 7a9b476..0000000 --- a/ml-xgboost/cub/examples/device/example_device_scan.cu +++ /dev/null @@ -1,186 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple example of DeviceScan::ExclusiveSum(). - * - * Computes an exclusive sum of int keys. - * - * To compile using the command line: - * nvcc -arch=sm_XX example_device_scan.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include - -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; // Whether to display input/output to console -CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - - -/** - * Initialize problem - */ -void Initialize( - int *h_in, - int num_items) -{ - for (int i = 0; i < num_items; ++i) - h_in[i] = i; - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n\n"); - } -} - -/** - * Solve exclusive-scan problem - */ -int Solve( - int *h_in, - int *h_reference, - int num_items) -{ - int inclusive = 0; - int aggregate = 0; - - for (int i = 0; i < num_items; ++i) - { - h_reference[i] = inclusive; - inclusive += h_in[i]; - aggregate += h_in[i]; - } - - return aggregate; -} - - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = 150; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--device=] " - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - printf("cub::DeviceScan::ExclusiveSum %d items (%d-byte elements)\n", - num_items, (int) sizeof(int)); - fflush(stdout); - - // Allocate host arrays - int* h_in = new int[num_items]; - int* h_reference = new int[num_items]; - - // Initialize problem and solution - Initialize(h_in, num_items); - Solve(h_in, h_reference, num_items); - - // Allocate problem device arrays - int *d_in = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice)); - - // Allocate device output array - int *d_out = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items)); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Run - CubDebugExit(DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items)); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose); - printf("\t%s", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - printf("\n\n"); - - return 0; -} - - - diff --git a/ml-xgboost/cub/examples/device/example_device_select_flagged.cu b/ml-xgboost/cub/examples/device/example_device_select_flagged.cu deleted file mode 100644 index 76012c4..0000000 --- a/ml-xgboost/cub/examples/device/example_device_select_flagged.cu +++ /dev/null @@ -1,233 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple example of DeviceSelect::Flagged(). - * - * Selects flagged items from from a sequence of int keys using a - * corresponding sequence of unsigned char flags. - * - * To compile using the command line: - * nvcc -arch=sm_XX example_device_select_flagged.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include - -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; // Whether to display input/output to console -CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - - -/** - * Initialize problem, setting flags at distances of random length - * chosen from [1..max_segment] - */ -void Initialize( - int *h_in, - unsigned char *h_flags, - int num_items, - int max_segment) -{ - unsigned short max_short = (unsigned short) -1; - - int key = 0; - int i = 0; - while (i < num_items) - { - // Select number of repeating occurrences - unsigned short repeat; - RandomBits(repeat); - repeat = (unsigned short) ((float(repeat) * (float(max_segment) / float(max_short)))); - repeat = CUB_MAX(1, repeat); - - int j = i; - while (j < CUB_MIN(i + repeat, num_items)) - { - h_flags[j] = 0; - h_in[j] = key; - j++; - } - - h_flags[i] = 1; - i = j; - key++; - } - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("Flags:\n"); - DisplayResults(h_flags, num_items); - printf("\n\n"); - } -} - - -/** - * Solve unique problem - */ -int Solve( - int *h_in, - unsigned char *h_flags, - int *h_reference, - int num_items) -{ - int num_selected = 0; - for (int i = 0; i < num_items; ++i) - { - if (h_flags[i]) - { - h_reference[num_selected] = h_in[i]; - num_selected++; - } - else - { - h_reference[num_items - (i - num_selected) - 1] = h_in[i]; - } - } - - return num_selected; -} - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = 150; - int max_segment = 40; // Maximum segment length - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("maxseg", max_segment); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--device=] " - "[--maxseg=] " - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Allocate host arrays - int *h_in = new int[num_items]; - int *h_reference = new int[num_items]; - unsigned char *h_flags = new unsigned char[num_items]; - - // Initialize problem and solution - Initialize(h_in, h_flags, num_items, max_segment); - int num_selected = Solve(h_in, h_flags, h_reference, num_items); - - printf("cub::DeviceSelect::Flagged %d items, %d selected (avg distance %d), %d-byte elements\n", - num_items, num_selected, (num_selected > 0) ? num_items / num_selected : 0, (int) sizeof(int)); - fflush(stdout); - - // Allocate problem device arrays - int *d_in = NULL; - unsigned char *d_flags = NULL; - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(unsigned char) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_flags, h_flags, sizeof(unsigned char) * num_items, cudaMemcpyHostToDevice)); - - // Allocate device output array and num selected - int *d_out = NULL; - int *d_num_selected_out = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int))); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Run - CubDebugExit(DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items)); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose); - printf("\t Data %s ", compare ? "FAIL" : "PASS"); - compare |= CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose); - printf("\t Count %s ", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags)); - - printf("\n\n"); - - return 0; -} - - - diff --git a/ml-xgboost/cub/examples/device/example_device_select_if.cu b/ml-xgboost/cub/examples/device/example_device_select_if.cu deleted file mode 100644 index a21988e..0000000 --- a/ml-xgboost/cub/examples/device/example_device_select_if.cu +++ /dev/null @@ -1,242 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple example of DeviceSelect::If(). - * - * Selects items from from a sequence of int keys using a - * section functor (greater-than) - * - * To compile using the command line: - * nvcc -arch=sm_XX example_device_select_if.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include - -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; // Whether to display input/output to console -CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory - - -/// Selection functor type -struct GreaterThan -{ - int compare; - - __host__ __device__ __forceinline__ - GreaterThan(int compare) : compare(compare) {} - - __host__ __device__ __forceinline__ - bool operator()(const int &a) const { - return (a > compare); - } -}; - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - -/** - * Initialize problem, setting runs of random length chosen from [1..max_segment] - */ -void Initialize( - int *h_in, - int num_items, - int max_segment) -{ - int key = 0; - int i = 0; - while (i < num_items) - { - // Randomly select number of repeating occurrences uniformly from [1..max_segment] - unsigned short max_short = (unsigned short) -1; - unsigned short repeat; - RandomBits(repeat); - repeat = (unsigned short) ((float(repeat) * (float(max_segment) / float(max_short)))); - repeat = CUB_MAX(1, repeat); - - int j = i; - while (j < CUB_MIN(i + repeat, num_items)) - { - h_in[j] = key; - j++; - } - - i = j; - key++; - } - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n\n"); - } -} - - -/** - * Solve unique problem - */ -template -int Solve( - int *h_in, - SelectOp select_op, - int *h_reference, - int num_items) -{ - int num_selected = 0; - for (int i = 0; i < num_items; ++i) - { - if (select_op(h_in[i])) - { - h_reference[num_selected] = h_in[i]; - num_selected++; - } - else - { - h_reference[num_items - (i - num_selected) - 1] = h_in[i]; - } - } - - return num_selected; -} - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = 150; - int max_segment = 40; // Maximum segment length - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("maxseg", max_segment); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--device=] " - "[--maxseg=]" - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Allocate host arrays - int *h_in = new int[num_items]; - int *h_reference = new int[num_items]; - - // Select a pivot index - unsigned int pivot_index; - unsigned int max_int = (unsigned int) -1; - RandomBits(pivot_index); - pivot_index = (unsigned int) ((float(pivot_index) * (float(num_items - 1) / float(max_int)))); - printf("Pivot idx: %d\n", pivot_index); fflush(stdout); - - // Initialize problem and solution - Initialize(h_in, num_items, max_segment); - GreaterThan select_op(h_in[pivot_index]); - - int num_selected = Solve(h_in, select_op, h_reference, num_items); - - printf("cub::DeviceSelect::If %d items, %d selected (avg run length %d), %d-byte elements\n", - num_items, num_selected, (num_selected > 0) ? num_items / num_selected : 0, (int) sizeof(int)); - fflush(stdout); - - // Allocate problem device arrays - int *d_in = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice)); - - // Allocate device output array and num selected - int *d_out = NULL; - int *d_num_selected_out = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int))); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Run - CubDebugExit(DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op)); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose); - printf("\t Data %s ", compare ? "FAIL" : "PASS"); - compare = compare | CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose); - printf("\t Count %s ", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - printf("\n\n"); - - return 0; -} - diff --git a/ml-xgboost/cub/examples/device/example_device_select_unique.cu b/ml-xgboost/cub/examples/device/example_device_select_unique.cu deleted file mode 100644 index ae5ea83..0000000 --- a/ml-xgboost/cub/examples/device/example_device_select_unique.cu +++ /dev/null @@ -1,221 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple example of DeviceSelect::Unique(). - * - * Selects the first element from each run of identical values from a sequence - * of int keys. - * - * To compile using the command line: - * nvcc -arch=sm_XX example_device_select_unique.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include - -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; // Whether to display input/output to console -CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - - -/** - * Initialize problem, setting runs of random length chosen from [1..max_segment] - */ -void Initialize( - int *h_in, - int num_items, - int max_segment) -{ - int key = 0; - int i = 0; - while (i < num_items) - { - // Randomly select number of repeating occurrences uniformly from [1..max_segment] - unsigned short max_short = (unsigned short) -1; - unsigned short repeat; - RandomBits(repeat); - repeat = (unsigned short) ((float(repeat) * (float(max_segment) / float(max_short)))); - repeat = CUB_MAX(1, repeat); - - int j = i; - while (j < CUB_MIN(i + repeat, num_items)) - { - h_in[j] = key; - j++; - } - - i = j; - key++; - } - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n\n"); - } -} - - -/** - * Solve unique problem - */ -int Solve( - int *h_in, - int *h_reference, - int num_items) -{ - int num_selected = 0; - if (num_items > 0) - { - h_reference[num_selected] = h_in[0]; - num_selected++; - } - - for (int i = 1; i < num_items; ++i) - { - if (h_in[i] != h_in[i - 1]) - { - h_reference[num_selected] = h_in[i]; - num_selected++; - } - } - - return num_selected; -} - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = 150; - int max_segment = 40; // Maximum segment length - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("maxseg", max_segment); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--device=] " - "[--maxseg=]" - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Allocate host arrays - int* h_in = new int[num_items]; - int* h_reference = new int[num_items]; - - // Initialize problem and solution - Initialize(h_in, num_items, max_segment); - int num_selected = Solve(h_in, h_reference, num_items); - - printf("cub::DeviceSelect::Unique %d items (%d-byte elements), %d selected (avg run length %d)\n", - num_items, (int) sizeof(int), num_selected, num_items / num_selected); - fflush(stdout); - - // Allocate problem device arrays - int *d_in = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice)); - - // Allocate device output array and num selected - int *d_out = NULL; - int *d_num_selected_out = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int))); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(DeviceSelect::Unique(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Run - CubDebugExit(DeviceSelect::Unique(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items)); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose); - printf("\t Data %s ", compare ? "FAIL" : "PASS"); - compare = compare | CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose); - printf("\t Count %s ", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - printf("\n\n"); - - return 0; -} - - - diff --git a/ml-xgboost/cub/examples/device/example_device_sort_find_non_trivial_runs.cu b/ml-xgboost/cub/examples/device/example_device_sort_find_non_trivial_runs.cu deleted file mode 100644 index 3fbe0eb..0000000 --- a/ml-xgboost/cub/examples/device/example_device_sort_find_non_trivial_runs.cu +++ /dev/null @@ -1,384 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Simple example of sorting a sequence of keys and values (each pair is a - * randomly-selected int32 paired with its original offset in the unsorted sequence), and then - * isolating all maximal, non-trivial (having length > 1) "runs" of duplicates. - * - * To compile using the command line: - * nvcc -arch=sm_XX example_device_sort_find_non_trivial_runs.cu -I../.. -lcudart -O3 - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include - -#include -#include -#include - -#include "../../test/test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; // Whether to display input/output to console -CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - -/** - * Simple key-value pairing for using std::sort on key-value pairs. - */ -template -struct Pair -{ - Key key; - Value value; - - bool operator<(const Pair &b) const - { - return (key < b.key); - } -}; - - -/** - * Pair ostream operator - */ -template -std::ostream& operator<<(std::ostream& os, const Pair& val) -{ - os << '<' << val.key << ',' << val.value << '>'; - return os; -} - - -/** - * Initialize problem - */ -template -void Initialize( - Key *h_keys, - Value *h_values, - int num_items, - int max_key) -{ - float scale = float(max_key) / float(UINT_MAX); - for (int i = 0; i < num_items; ++i) - { - Key sample; - RandomBits(sample); - h_keys[i] = (max_key == -1) ? i : (Key) (scale * sample); - h_values[i] = i; - } - - if (g_verbose) - { - printf("Keys:\n"); - DisplayResults(h_keys, num_items); - printf("\n\n"); - - printf("Values:\n"); - DisplayResults(h_values, num_items); - printf("\n\n"); - } -} - - -/** - * Solve sorted non-trivial subrange problem. Returns the number - * of non-trivial runs found. - */ -template -int Solve( - Key *h_keys, - Value *h_values, - int num_items, - int *h_offsets_reference, - int *h_lengths_reference) -{ - // Sort - - Pair *h_pairs = new Pair[num_items]; - for (int i = 0; i < num_items; ++i) - { - h_pairs[i].key = h_keys[i]; - h_pairs[i].value = h_values[i]; - } - - std::stable_sort(h_pairs, h_pairs + num_items); - - if (g_verbose) - { - printf("Sorted pairs:\n"); - DisplayResults(h_pairs, num_items); - printf("\n\n"); - } - - // Find non-trivial runs - - Key previous = h_pairs[0].key; - int length = 1; - int num_runs = 0; - int run_begin = 0; - - for (int i = 1; i < num_items; ++i) - { - if (previous != h_pairs[i].key) - { - if (length > 1) - { - h_offsets_reference[num_runs] = run_begin; - h_lengths_reference[num_runs] = length; - num_runs++; - } - length = 1; - run_begin = i; - } - else - { - length++; - } - previous = h_pairs[i].key; - } - - if (length > 1) - { - h_offsets_reference[num_runs] = run_begin; - h_lengths_reference[num_runs] = length; - num_runs++; - } - - delete[] h_pairs; - - return num_runs; -} - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - typedef unsigned int Key; - typedef int Value; - - int timing_iterations = 0; - int num_items = 40; - Key max_key = 20; // Max item - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("maxkey", max_key); - args.GetCmdLineArgument("i", timing_iterations); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--i= " - "[--n= " - "[--maxkey=]" - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Allocate host arrays (problem and reference solution) - - Key *h_keys = new Key[num_items]; - Value *h_values = new Value[num_items]; - int *h_offsets_reference = new int[num_items]; - int *h_lengths_reference = new int[num_items]; - - // Initialize key-value pairs and compute reference solution (sort them, and identify non-trivial runs) - printf("Computing reference solution on CPU for %d items (max key %d)\n", num_items, max_key); - fflush(stdout); - - Initialize(h_keys, h_values, num_items, max_key); - int num_runs = Solve(h_keys, h_values, num_items, h_offsets_reference, h_lengths_reference); - - printf("%d non-trivial runs\n", num_runs); - fflush(stdout); - - // Repeat for performance timing - GpuTimer gpu_timer; - GpuTimer gpu_rle_timer; - float elapsed_millis = 0.0; - float elapsed_rle_millis = 0.0; - for (int i = 0; i <= timing_iterations; ++i) - { - - // Allocate and initialize device arrays for sorting - DoubleBuffer d_keys; - DoubleBuffer d_values; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys.d_buffers[0], sizeof(Key) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys.d_buffers[1], sizeof(Key) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values.d_buffers[0], sizeof(Value) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values.d_buffers[1], sizeof(Value) * num_items)); - - CubDebugExit(cudaMemcpy(d_keys.d_buffers[d_keys.selector], h_keys, sizeof(float) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_values.d_buffers[d_values.selector], h_values, sizeof(int) * num_items, cudaMemcpyHostToDevice)); - - // Start timer - gpu_timer.Start(); - - // Allocate temporary storage for sorting - size_t temp_storage_bytes = 0; - void *d_temp_storage = NULL; - CubDebugExit(DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Do the sort - CubDebugExit(DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items)); - - // Free unused buffers and sorting temporary storage - if (d_keys.d_buffers[d_keys.selector ^ 1]) CubDebugExit(g_allocator.DeviceFree(d_keys.d_buffers[d_keys.selector ^ 1])); - if (d_values.d_buffers[d_values.selector ^ 1]) CubDebugExit(g_allocator.DeviceFree(d_values.d_buffers[d_values.selector ^ 1])); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - // Start timer - gpu_rle_timer.Start(); - - // Allocate device arrays for enumerating non-trivial runs - int *d_offests_out = NULL; - int *d_lengths_out = NULL; - int *d_num_runs = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_offests_out, sizeof(int) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_lengths_out, sizeof(int) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_runs, sizeof(int) * 1)); - - // Allocate temporary storage for isolating non-trivial runs - d_temp_storage = NULL; - CubDebugExit(DeviceRunLengthEncode::NonTrivialRuns( - d_temp_storage, - temp_storage_bytes, - d_keys.d_buffers[d_keys.selector], - d_offests_out, - d_lengths_out, - d_num_runs, - num_items)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Do the isolation - CubDebugExit(DeviceRunLengthEncode::NonTrivialRuns( - d_temp_storage, - temp_storage_bytes, - d_keys.d_buffers[d_keys.selector], - d_offests_out, - d_lengths_out, - d_num_runs, - num_items)); - - // Free keys buffer - if (d_keys.d_buffers[d_keys.selector]) CubDebugExit(g_allocator.DeviceFree(d_keys.d_buffers[d_keys.selector])); - - // - // Hypothetically do stuff with the original key-indices corresponding to non-trivial runs of identical keys - // - - // Stop sort timer - gpu_timer.Stop(); - gpu_rle_timer.Stop(); - - if (i == 0) - { - // First iteration is a warmup: // Check for correctness (and display results, if specified) - - printf("\nRUN OFFSETS: \n"); - int compare = CompareDeviceResults(h_offsets_reference, d_offests_out, num_runs, true, g_verbose); - printf("\t\t %s ", compare ? "FAIL" : "PASS"); - - printf("\nRUN LENGTHS: \n"); - compare |= CompareDeviceResults(h_lengths_reference, d_lengths_out, num_runs, true, g_verbose); - printf("\t\t %s ", compare ? "FAIL" : "PASS"); - - printf("\nNUM RUNS: \n"); - compare |= CompareDeviceResults(&num_runs, d_num_runs, 1, true, g_verbose); - printf("\t\t %s ", compare ? "FAIL" : "PASS"); - - AssertEquals(0, compare); - } - else - { - elapsed_millis += gpu_timer.ElapsedMillis(); - elapsed_rle_millis += gpu_rle_timer.ElapsedMillis(); - } - - // GPU cleanup - - if (d_values.d_buffers[d_values.selector]) CubDebugExit(g_allocator.DeviceFree(d_values.d_buffers[d_values.selector])); - if (d_offests_out) CubDebugExit(g_allocator.DeviceFree(d_offests_out)); - if (d_lengths_out) CubDebugExit(g_allocator.DeviceFree(d_lengths_out)); - if (d_num_runs) CubDebugExit(g_allocator.DeviceFree(d_num_runs)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - } - - // Host cleanup - if (h_keys) delete[] h_keys; - if (h_values) delete[] h_values; - if (h_offsets_reference) delete[] h_offsets_reference; - if (h_lengths_reference) delete[] h_lengths_reference; - - printf("\n\n"); - - if (timing_iterations > 0) - { - printf("%d timing iterations, average time to sort and isolate non-trivial duplicates: %.3f ms (%.3f ms spent in RLE isolation)\n", - timing_iterations, - elapsed_millis / timing_iterations, - elapsed_rle_millis / timing_iterations); - } - - return 0; -} - - - diff --git a/ml-xgboost/cub/experimental/.gitignore b/ml-xgboost/cub/experimental/.gitignore deleted file mode 100644 index 5e56e04..0000000 --- a/ml-xgboost/cub/experimental/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/bin diff --git a/ml-xgboost/cub/experimental/Makefile b/ml-xgboost/cub/experimental/Makefile deleted file mode 100644 index 91075ac..0000000 --- a/ml-xgboost/cub/experimental/Makefile +++ /dev/null @@ -1,125 +0,0 @@ -#/****************************************************************************** -# * Copyright (c) 2011, Duane Merrill. All rights reserved. -# * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. -# * -# * Redistribution and use in source and binary forms, with or without -# * modification, are permitted provided that the following conditions are met: -# * * Redistributions of source code must retain the above copyright -# * notice, this list of conditions and the following disclaimer. -# * * Redistributions in binary form must reproduce the above copyright -# * notice, this list of conditions and the following disclaimer in the -# * documentation and/or other materials provided with the distribution. -# * * Neither the name of the NVIDIA CORPORATION nor the -# * names of its contributors may be used to endorse or promote products -# * derived from this software without specific prior written permission. -# * -# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY -# * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# * -#******************************************************************************/ - -#------------------------------------------------------------------------------- -# -# Makefile usage -# -# make [sm=] [cdp=<0|1>] [force32=<0|1>] [abi=<0|1>] [open64=<0|1>] [verbose=<0|1>] [keep=<0|1>] [quicktest=<0|1>] -# -#------------------------------------------------------------------------------- - -include ../common.mk - -#------------------------------------------------------------------------------- -# Commandline Options -#------------------------------------------------------------------------------- - -# [mkl=<0|1>] compile against Intel MKL -ifeq ($(mkl), 1) - DEFINES += -DCUB_MKL - -ifeq (WIN_NT, $(findstring WIN_NT, $(OSUPPER))) - LIBS += mkl_intel_lp64.lib mkl_intel_thread.lib mkl_core.lib libiomp5md.lib - NVCCFLAGS += -Xcompiler /openmp -else - LIBS += -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm - NVCCFLAGS += -Xcompiler -fopenmp - -endif - -endif - - -#------------------------------------------------------------------------------- -# Compiler and compilation platform -#------------------------------------------------------------------------------- - -# Includes -INC += -I$(CUB_DIR) -I$(CUB_DIR)test - -# detect OS -OSUPPER = $(shell uname -s 2>/dev/null | tr [:lower:] [:upper:]) - -#------------------------------------------------------------------------------- -# Dependency Lists -#------------------------------------------------------------------------------- - -exp_rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) - -EXP_DEPS = $(call rwildcard, ./,*.cuh) \ - $(call rwildcard, ./,*.h) - -DEPS = $(CUB_DEPS) \ - $(EXP_DEPS) \ - $(CUB_DIR)test/Makefile \ - $(CUB_DIR)test/test_util.h \ - $(CUB_DIR)test/mersenne.h \ - - - -#------------------------------------------------------------------------------- -# make default -#------------------------------------------------------------------------------- - -default: - - -#------------------------------------------------------------------------------- -# make clean -#------------------------------------------------------------------------------- - -clean : - rm -f bin/*$(CPU_ARCH_SUFFIX)* - rm -f *.i* *.cubin *.cu.c *.cudafe* *.fatbin.c *.ptx *.hash *.cu.cpp *.o - - - -#------------------------------------------------------------------------------- -# make histogram_compare -#------------------------------------------------------------------------------- - -histogram_compare: bin/histogram_compare_$(BIN_SUFFIX) - -bin/histogram_compare_$(BIN_SUFFIX) : histogram_compare.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/histogram_compare_$(BIN_SUFFIX) histogram_compare.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - - -#------------------------------------------------------------------------------- -# make spmv_compare -#------------------------------------------------------------------------------- - -spmv_compare: bin/spmv_compare_$(BIN_SUFFIX) - -bin/spmv_compare_$(BIN_SUFFIX) : spmv_compare.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/spmv_compare_$(BIN_SUFFIX) spmv_compare.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -lcusparse $(MKL_LIBS) -O3 - - diff --git a/ml-xgboost/cub/experimental/defunct/example_coo_spmv.cu b/ml-xgboost/cub/experimental/defunct/example_coo_spmv.cu deleted file mode 100644 index 09af0da..0000000 --- a/ml-xgboost/cub/experimental/defunct/example_coo_spmv.cu +++ /dev/null @@ -1,1070 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * An implementation of COO SpMV using prefix scan to implement a - * reduce-value-by-row strategy - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include -#include -#include -#include - -#include - -#include "coo_graph.cuh" -#include "../test/test_util.h" - -using namespace cub; -using namespace std; - - -/****************************************************************************** - * Globals, constants, and typedefs - ******************************************************************************/ - -typedef int VertexId; // uint32s as vertex ids -typedef double Value; // double-precision floating point values - -bool g_verbose = false; -int g_timing_iterations = 1; -CachingDeviceAllocator g_allocator; - - -/****************************************************************************** - * Texture referencing - ******************************************************************************/ - -/** - * Templated texture reference type for multiplicand vector - */ -template -struct TexVector -{ - // Texture type to actually use (e.g., because CUDA doesn't load doubles as texture items) - typedef typename If<(Equals::VALUE), uint2, Value>::Type CastType; - - // Texture reference type - typedef texture TexRef; - - static TexRef ref; - - /** - * Bind textures - */ - static void BindTexture(void *d_in, int elements) - { - cudaChannelFormatDesc tex_desc = cudaCreateChannelDesc(); - if (d_in) - { - size_t offset; - size_t bytes = sizeof(CastType) * elements; - CubDebugExit(cudaBindTexture(&offset, ref, d_in, tex_desc, bytes)); - } - } - - /** - * Unbind textures - */ - static void UnbindTexture() - { - CubDebugExit(cudaUnbindTexture(ref)); - } - - /** - * Load - */ - static __device__ __forceinline__ Value Load(int offset) - { - Value output; - reinterpret_cast::CastType &>(output) = tex1Dfetch(TexVector::ref, offset); - return output; - } -}; - -// Texture reference definitions -template -typename TexVector::TexRef TexVector::ref = 0; - - -/****************************************************************************** - * Utility types - ******************************************************************************/ - - -/** - * A partial dot-product sum paired with a corresponding row-id - */ -template -struct PartialProduct -{ - VertexId row; /// Row-id - Value partial; /// PartialProduct sum -}; - - -/** - * A partial dot-product sum paired with a corresponding row-id (specialized for double-int pairings) - */ -template <> -struct PartialProduct -{ - long long row; /// Row-id - double partial; /// PartialProduct sum -}; - - -/** - * Reduce-value-by-row scan operator - */ -struct ReduceByKeyOp -{ - template - __device__ __forceinline__ PartialProduct operator()( - const PartialProduct &first, - const PartialProduct &second) - { - PartialProduct retval; - - retval.partial = (second.row != first.row) ? - second.partial : - first.partial + second.partial; - - retval.row = second.row; - return retval; - } -}; - - -/** - * Stateful block-wide prefix operator for BlockScan - */ -template -struct BlockPrefixCallbackOp -{ - // Running block-wide prefix - PartialProduct running_prefix; - - /** - * Returns the block-wide running_prefix in thread-0 - */ - __device__ __forceinline__ PartialProduct operator()( - const PartialProduct &block_aggregate) ///< The aggregate sum of the BlockScan inputs - { - ReduceByKeyOp scan_op; - - PartialProduct retval = running_prefix; - running_prefix = scan_op(running_prefix, block_aggregate); - return retval; - } -}; - - -/** - * Operator for detecting discontinuities in a list of row identifiers. - */ -struct NewRowOp -{ - /// Returns true if row_b is the start of a new row - template - __device__ __forceinline__ bool operator()( - const VertexId& row_a, - const VertexId& row_b) - { - return (row_a != row_b); - } -}; - - - -/****************************************************************************** - * Persistent thread block types - ******************************************************************************/ - -/** - * SpMV threadblock abstraction for processing a contiguous segment of - * sparse COO tiles. - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - typename VertexId, - typename Value> -struct PersistentBlockSpmv -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - // Constants - enum - { - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - }; - - // Head flag type - typedef int HeadFlag; - - // Partial dot product type - typedef PartialProduct PartialProduct; - - // Parameterized BlockScan type for reduce-value-by-row scan - typedef BlockScan BlockScan; - - // Parameterized BlockExchange type for exchanging rows between warp-striped -> blocked arrangements - typedef BlockExchange BlockExchangeRows; - - // Parameterized BlockExchange type for exchanging values between warp-striped -> blocked arrangements - typedef BlockExchange BlockExchangeValues; - - // Parameterized BlockDiscontinuity type for setting head-flags for each new row segment - typedef BlockDiscontinuity BlockDiscontinuity; - - // Shared memory type for this threadblock - struct TempStorage - { - union - { - typename BlockExchangeRows::TempStorage exchange_rows; // Smem needed for BlockExchangeRows - typename BlockExchangeValues::TempStorage exchange_values; // Smem needed for BlockExchangeValues - struct - { - typename BlockScan::TempStorage scan; // Smem needed for BlockScan - typename BlockDiscontinuity::TempStorage discontinuity; // Smem needed for BlockDiscontinuity - }; - }; - - VertexId first_block_row; ///< The first row-ID seen by this thread block - VertexId last_block_row; ///< The last row-ID seen by this thread block - Value first_product; ///< The first dot-product written by this thread block - }; - - //--------------------------------------------------------------------- - // Thread fields - //--------------------------------------------------------------------- - - TempStorage &temp_storage; - BlockPrefixCallbackOp prefix_op; - VertexId *d_rows; - VertexId *d_columns; - Value *d_values; - Value *d_vector; - Value *d_result; - PartialProduct *d_block_partials; - int block_offset; - int block_end; - - - //--------------------------------------------------------------------- - // Operations - //--------------------------------------------------------------------- - - /** - * Constructor - */ - __device__ __forceinline__ - PersistentBlockSpmv( - TempStorage &temp_storage, - VertexId *d_rows, - VertexId *d_columns, - Value *d_values, - Value *d_vector, - Value *d_result, - PartialProduct *d_block_partials, - int block_offset, - int block_end) - : - temp_storage(temp_storage), - d_rows(d_rows), - d_columns(d_columns), - d_values(d_values), - d_vector(d_vector), - d_result(d_result), - d_block_partials(d_block_partials), - block_offset(block_offset), - block_end(block_end) - { - // Initialize scalar shared memory values - if (threadIdx.x == 0) - { - VertexId first_block_row = d_rows[block_offset]; - VertexId last_block_row = d_rows[block_end - 1]; - - temp_storage.first_block_row = first_block_row; - temp_storage.last_block_row = last_block_row; - temp_storage.first_product = Value(0); - - // Initialize prefix_op to identity - prefix_op.running_prefix.row = first_block_row; - prefix_op.running_prefix.partial = Value(0); - } - - __syncthreads(); - } - - - /** - * Processes a COO input tile of edges, outputting dot products for each row - */ - template - __device__ __forceinline__ void ProcessTile( - int block_offset, - int guarded_items = 0) - { - VertexId columns[ITEMS_PER_THREAD]; - VertexId rows[ITEMS_PER_THREAD]; - Value values[ITEMS_PER_THREAD]; - PartialProduct partial_sums[ITEMS_PER_THREAD]; - HeadFlag head_flags[ITEMS_PER_THREAD]; - - // Load a threadblock-striped tile of A (sparse row-ids, column-ids, and values) - if (FULL_TILE) - { - // Unguarded loads - LoadDirectWarpStriped(threadIdx.x, d_columns + block_offset, columns); - LoadDirectWarpStriped(threadIdx.x, d_values + block_offset, values); - LoadDirectWarpStriped(threadIdx.x, d_rows + block_offset, rows); - } - else - { - // This is a partial-tile (e.g., the last tile of input). Extend the coordinates of the last - // vertex for out-of-bound items, but zero-valued - LoadDirectWarpStriped(threadIdx.x, d_columns + block_offset, columns, guarded_items, VertexId(0)); - LoadDirectWarpStriped(threadIdx.x, d_values + block_offset, values, guarded_items, Value(0)); - LoadDirectWarpStriped(threadIdx.x, d_rows + block_offset, rows, guarded_items, temp_storage.last_block_row); - } - - // Load the referenced values from x and compute the dot product partials sums - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { -#if CUB_PTX_ARCH >= 350 - values[ITEM] *= ThreadLoad(d_vector + columns[ITEM]); -#else - values[ITEM] *= TexVector::Load(columns[ITEM]); -#endif - } - - // Transpose from warp-striped to blocked arrangement - BlockExchangeValues(temp_storage.exchange_values).WarpStripedToBlocked(values); - - __syncthreads(); - - // Transpose from warp-striped to blocked arrangement - BlockExchangeRows(temp_storage.exchange_rows).WarpStripedToBlocked(rows); - - // Barrier for smem reuse and coherence - __syncthreads(); - - // FlagT row heads by looking for discontinuities - BlockDiscontinuity(temp_storage.discontinuity).FlagHeads( - head_flags, // (Out) Head flags - rows, // Original row ids - NewRowOp(), // Functor for detecting start of new rows - prefix_op.running_prefix.row); // Last row ID from previous tile to compare with first row ID in this tile - - // Assemble partial product structures - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - partial_sums[ITEM].partial = values[ITEM]; - partial_sums[ITEM].row = rows[ITEM]; - } - - // Reduce reduce-value-by-row across partial_sums using exclusive prefix scan - PartialProduct block_aggregate; - BlockScan(temp_storage.scan).ExclusiveScan( - partial_sums, // Scan input - partial_sums, // Scan output - ReduceByKeyOp(), // Scan operator - block_aggregate, // Block-wide total (unused) - prefix_op); // Prefix operator for seeding the block-wide scan with the running total - - // Barrier for smem reuse and coherence - __syncthreads(); - - // Scatter an accumulated dot product if it is the head of a valid row - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if (head_flags[ITEM]) - { - d_result[partial_sums[ITEM].row] = partial_sums[ITEM].partial; - - // Save off the first partial product that this thread block will scatter - if (partial_sums[ITEM].row == temp_storage.first_block_row) - { - temp_storage.first_product = partial_sums[ITEM].partial; - } - } - } - } - - - /** - * Iterate over input tiles belonging to this thread block - */ - __device__ __forceinline__ - void ProcessTiles() - { - // Process full tiles - while (block_offset <= block_end - TILE_ITEMS) - { - ProcessTile(block_offset); - block_offset += TILE_ITEMS; - } - - // Process the last, partially-full tile (if present) - int guarded_items = block_end - block_offset; - if (guarded_items) - { - ProcessTile(block_offset, guarded_items); - } - - if (threadIdx.x == 0) - { - if (gridDim.x == 1) - { - // Scatter the final aggregate (this kernel contains only 1 threadblock) - d_result[prefix_op.running_prefix.row] = prefix_op.running_prefix.partial; - } - else - { - // Write the first and last partial products from this thread block so - // that they can be subsequently "fixed up" in the next kernel. - - PartialProduct first_product; - first_product.row = temp_storage.first_block_row; - first_product.partial = temp_storage.first_product; - - d_block_partials[blockIdx.x * 2] = first_product; - d_block_partials[(blockIdx.x * 2) + 1] = prefix_op.running_prefix; - } - } - } -}; - - -/** - * Threadblock abstraction for "fixing up" an array of interblock SpMV partial products. - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - typename VertexId, - typename Value> -struct FinalizeSpmvBlock -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - // Constants - enum - { - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - }; - - // Head flag type - typedef int HeadFlag; - - // Partial dot product type - typedef PartialProduct PartialProduct; - - // Parameterized BlockScan type for reduce-value-by-row scan - typedef BlockScan BlockScan; - - // Parameterized BlockDiscontinuity type for setting head-flags for each new row segment - typedef BlockDiscontinuity BlockDiscontinuity; - - // Shared memory type for this threadblock - struct TempStorage - { - typename BlockScan::TempStorage scan; // Smem needed for reduce-value-by-row scan - typename BlockDiscontinuity::TempStorage discontinuity; // Smem needed for head-flagging - - VertexId last_block_row; - }; - - - //--------------------------------------------------------------------- - // Thread fields - //--------------------------------------------------------------------- - - TempStorage &temp_storage; - BlockPrefixCallbackOp prefix_op; - Value *d_result; - PartialProduct *d_block_partials; - int num_partials; - - - //--------------------------------------------------------------------- - // Operations - //--------------------------------------------------------------------- - - /** - * Constructor - */ - __device__ __forceinline__ - FinalizeSpmvBlock( - TempStorage &temp_storage, - Value *d_result, - PartialProduct *d_block_partials, - int num_partials) - : - temp_storage(temp_storage), - d_result(d_result), - d_block_partials(d_block_partials), - num_partials(num_partials) - { - // Initialize scalar shared memory values - if (threadIdx.x == 0) - { - VertexId first_block_row = d_block_partials[0].row; - VertexId last_block_row = d_block_partials[num_partials - 1].row; - temp_storage.last_block_row = last_block_row; - - // Initialize prefix_op to identity - prefix_op.running_prefix.row = first_block_row; - prefix_op.running_prefix.partial = Value(0); - } - - __syncthreads(); - } - - - /** - * Processes a COO input tile of edges, outputting dot products for each row - */ - template - __device__ __forceinline__ - void ProcessTile( - int block_offset, - int guarded_items = 0) - { - VertexId rows[ITEMS_PER_THREAD]; - PartialProduct partial_sums[ITEMS_PER_THREAD]; - HeadFlag head_flags[ITEMS_PER_THREAD]; - - // Load a tile of block partials from previous kernel - if (FULL_TILE) - { - // Full tile -#if CUB_PTX_ARCH >= 350 - LoadDirectBlocked(threadIdx.x, d_block_partials + block_offset, partial_sums); -#else - LoadDirectBlocked(threadIdx.x, d_block_partials + block_offset, partial_sums); -#endif - } - else - { - // Partial tile (extend zero-valued coordinates of the last partial-product for out-of-bounds items) - PartialProduct default_sum; - default_sum.row = temp_storage.last_block_row; - default_sum.partial = Value(0); - -#if CUB_PTX_ARCH >= 350 - LoadDirectBlocked(threadIdx.x, d_block_partials + block_offset, partial_sums, guarded_items, default_sum); -#else - LoadDirectBlocked(threadIdx.x, d_block_partials + block_offset, partial_sums, guarded_items, default_sum); -#endif - } - - // Copy out row IDs for row-head flagging - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - rows[ITEM] = partial_sums[ITEM].row; - } - - // FlagT row heads by looking for discontinuities - BlockDiscontinuity(temp_storage.discontinuity).FlagHeads( - rows, // Original row ids - head_flags, // (Out) Head flags - NewRowOp(), // Functor for detecting start of new rows - prefix_op.running_prefix.row); // Last row ID from previous tile to compare with first row ID in this tile - - // Reduce reduce-value-by-row across partial_sums using exclusive prefix scan - PartialProduct block_aggregate; - BlockScan(temp_storage.scan).ExclusiveScan( - partial_sums, // Scan input - partial_sums, // Scan output - ReduceByKeyOp(), // Scan operator - block_aggregate, // Block-wide total (unused) - prefix_op); // Prefix operator for seeding the block-wide scan with the running total - - // Scatter an accumulated dot product if it is the head of a valid row - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if (head_flags[ITEM]) - { - d_result[partial_sums[ITEM].row] = partial_sums[ITEM].partial; - } - } - } - - - /** - * Iterate over input tiles belonging to this thread block - */ - __device__ __forceinline__ - void ProcessTiles() - { - // Process full tiles - int block_offset = 0; - while (block_offset <= num_partials - TILE_ITEMS) - { - ProcessTile(block_offset); - block_offset += TILE_ITEMS; - } - - // Process final partial tile (if present) - int guarded_items = num_partials - block_offset; - if (guarded_items) - { - ProcessTile(block_offset, guarded_items); - } - - // Scatter the final aggregate (this kernel contains only 1 threadblock) - if (threadIdx.x == 0) - { - d_result[prefix_op.running_prefix.row] = prefix_op.running_prefix.partial; - } - } -}; - - -/****************************************************************************** - * Kernel entrypoints - ******************************************************************************/ - - - -/** - * SpMV kernel whose thread blocks each process a contiguous segment of sparse COO tiles. - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - typename VertexId, - typename Value> -__launch_bounds__ (BLOCK_THREADS) -__global__ void CooKernel( - GridEvenShare even_share, - PartialProduct *d_block_partials, - VertexId *d_rows, - VertexId *d_columns, - Value *d_values, - Value *d_vector, - Value *d_result) -{ - // Specialize SpMV threadblock abstraction type - typedef PersistentBlockSpmv PersistentBlockSpmv; - - // Shared memory allocation - __shared__ typename PersistentBlockSpmv::TempStorage temp_storage; - - // Initialize threadblock even-share to tell us where to start and stop our tile-processing - even_share.BlockInit(); - - // Construct persistent thread block - PersistentBlockSpmv persistent_block( - temp_storage, - d_rows, - d_columns, - d_values, - d_vector, - d_result, - d_block_partials, - even_share.block_offset, - even_share.block_end); - - // Process input tiles - persistent_block.ProcessTiles(); -} - - -/** - * Kernel for "fixing up" an array of interblock SpMV partial products. - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - typename VertexId, - typename Value> -__launch_bounds__ (BLOCK_THREADS, 1) -__global__ void CooFinalizeKernel( - PartialProduct *d_block_partials, - int num_partials, - Value *d_result) -{ - // Specialize "fix-up" threadblock abstraction type - typedef FinalizeSpmvBlock FinalizeSpmvBlock; - - // Shared memory allocation - __shared__ typename FinalizeSpmvBlock::TempStorage temp_storage; - - // Construct persistent thread block - FinalizeSpmvBlock persistent_block(temp_storage, d_result, d_block_partials, num_partials); - - // Process input tiles - persistent_block.ProcessTiles(); -} - - - -//--------------------------------------------------------------------- -// Host subroutines -//--------------------------------------------------------------------- - - -/** - * Simple test of device - */ -template < - int COO_BLOCK_THREADS, - int COO_ITEMS_PER_THREAD, - int COO_SUBSCRIPTION_FACTOR, - int FINALIZE_BLOCK_THREADS, - int FINALIZE_ITEMS_PER_THREAD, - typename VertexId, - typename Value> -void TestDevice( - CooGraph& coo_graph, - Value* h_vector, - Value* h_reference) -{ - typedef PartialProduct PartialProduct; - - const int COO_TILE_SIZE = COO_BLOCK_THREADS * COO_ITEMS_PER_THREAD; - - // SOA device storage - VertexId *d_rows; // SOA graph row coordinates - VertexId *d_columns; // SOA graph col coordinates - Value *d_values; // SOA graph values - Value *d_vector; // Vector multiplicand - Value *d_result; // Output row - PartialProduct *d_block_partials; // Temporary storage for communicating dot product partials between threadblocks - - // Create SOA version of coo_graph on host - int num_edges = coo_graph.coo_tuples.size(); - VertexId *h_rows = new VertexId[num_edges]; - VertexId *h_columns = new VertexId[num_edges]; - Value *h_values = new Value[num_edges]; - for (int i = 0; i < num_edges; i++) - { - h_rows[i] = coo_graph.coo_tuples[i].row; - h_columns[i] = coo_graph.coo_tuples[i].col; - h_values[i] = coo_graph.coo_tuples[i].val; - } - - // Get CUDA properties - Device device_props; - CubDebugExit(device_props.Init()); - - // Determine launch configuration from kernel properties - int coo_sm_occupancy; - CubDebugExit(device_props.MaxSmOccupancy( - coo_sm_occupancy, - CooKernel, - COO_BLOCK_THREADS)); - int max_coo_grid_size = device_props.sm_count * coo_sm_occupancy * COO_SUBSCRIPTION_FACTOR; - - // Construct an even-share work distribution - GridEvenShare even_share(num_edges, max_coo_grid_size, COO_TILE_SIZE); - int coo_grid_size = even_share.grid_size; - int num_partials = coo_grid_size * 2; - - // Allocate COO device arrays - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_rows, sizeof(VertexId) * num_edges)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_columns, sizeof(VertexId) * num_edges)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values, sizeof(Value) * num_edges)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_vector, sizeof(Value) * coo_graph.col_dim)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_result, sizeof(Value) * coo_graph.row_dim)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_block_partials, sizeof(PartialProduct) * num_partials)); - - // Copy host arrays to device - CubDebugExit(cudaMemcpy(d_rows, h_rows, sizeof(VertexId) * num_edges, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_columns, h_columns, sizeof(VertexId) * num_edges, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_values, h_values, sizeof(Value) * num_edges, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_vector, h_vector, sizeof(Value) * coo_graph.col_dim, cudaMemcpyHostToDevice)); - - // Bind textures - TexVector::BindTexture(d_vector, coo_graph.col_dim); - - // Print debug info - printf("CooKernel<%d, %d><<<%d, %d>>>(...), Max SM occupancy: %d\n", - COO_BLOCK_THREADS, COO_ITEMS_PER_THREAD, coo_grid_size, COO_BLOCK_THREADS, coo_sm_occupancy); - if (coo_grid_size > 1) - { - printf("CooFinalizeKernel<<<1, %d>>>(...)\n", FINALIZE_BLOCK_THREADS); - } - fflush(stdout); - - CubDebugExit(cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte)); - - // Run kernel (always run one iteration without timing) - GpuTimer gpu_timer; - float elapsed_millis = 0.0; - for (int i = 0; i <= g_timing_iterations; i++) - { - gpu_timer.Start(); - - // Initialize output - CubDebugExit(cudaMemset(d_result, 0, coo_graph.row_dim * sizeof(Value))); - - // Run the COO kernel - CooKernel<<>>( - even_share, - d_block_partials, - d_rows, - d_columns, - d_values, - d_vector, - d_result); - - if (coo_grid_size > 1) - { - // Run the COO finalize kernel - CooFinalizeKernel<<<1, FINALIZE_BLOCK_THREADS>>>( - d_block_partials, - num_partials, - d_result); - } - - gpu_timer.Stop(); - - if (i > 0) - elapsed_millis += gpu_timer.ElapsedMillis(); - } - - // Force any kernel stdio to screen - CubDebugExit(cudaThreadSynchronize()); - fflush(stdout); - - // Display timing - if (g_timing_iterations > 0) - { - float avg_elapsed = elapsed_millis / g_timing_iterations; - int total_bytes = ((sizeof(VertexId) + sizeof(VertexId)) * 2 * num_edges) + (sizeof(Value) * coo_graph.row_dim); - printf("%d iterations, average elapsed (%.3f ms), utilized bandwidth (%.3f GB/s), GFLOPS(%.3f)\n", - g_timing_iterations, - avg_elapsed, - total_bytes / avg_elapsed / 1000.0 / 1000.0, - num_edges * 2 / avg_elapsed / 1000.0 / 1000.0); - } - - // Check results - int compare = CompareDeviceResults(h_reference, d_result, coo_graph.row_dim, true, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - TexVector::UnbindTexture(); - CubDebugExit(g_allocator.DeviceFree(d_block_partials)); - CubDebugExit(g_allocator.DeviceFree(d_rows)); - CubDebugExit(g_allocator.DeviceFree(d_columns)); - CubDebugExit(g_allocator.DeviceFree(d_values)); - CubDebugExit(g_allocator.DeviceFree(d_vector)); - CubDebugExit(g_allocator.DeviceFree(d_result)); - delete[] h_rows; - delete[] h_columns; - delete[] h_values; -} - - -/** - * Compute reference answer on CPU - */ -template -void ComputeReference( - CooGraph& coo_graph, - Value* h_vector, - Value* h_reference) -{ - for (VertexId i = 0; i < coo_graph.row_dim; i++) - { - h_reference[i] = 0.0; - } - - for (VertexId i = 0; i < coo_graph.coo_tuples.size(); i++) - { - h_reference[coo_graph.coo_tuples[i].row] += - coo_graph.coo_tuples[i].val * - h_vector[coo_graph.coo_tuples[i].col]; - } -} - - -/** - * Assign arbitrary values to vector items - */ -template -void AssignVectorValues(Value *vector, int col_dim) -{ - for (int i = 0; i < col_dim; i++) - { - vector[i] = 1.0; - } -} - - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("i", g_timing_iterations); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s\n [--device=] [--v] [--iterations=] [--grid-size=]\n" - "\t--type=wheel --spokes=\n" - "\t--type=grid2d --width= [--no-self-loops]\n" - "\t--type=grid3d --width= [--no-self-loops]\n" - "\t--type=market --file=\n" - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Get graph type - string type; - args.GetCmdLineArgument("type", type); - - // Generate graph structure - - CpuTimer timer; - timer.Start(); - CooGraph coo_graph; - if (type == string("grid2d")) - { - VertexId width; - args.GetCmdLineArgument("width", width); - bool self_loops = !args.CheckCmdLineFlag("no-self-loops"); - printf("Generating %s grid2d width(%d)... ", (self_loops) ? "5-pt" : "4-pt", width); fflush(stdout); - if (coo_graph.InitGrid2d(width, self_loops)) exit(1); - } else if (type == string("grid3d")) - { - VertexId width; - args.GetCmdLineArgument("width", width); - bool self_loops = !args.CheckCmdLineFlag("no-self-loops"); - printf("Generating %s grid3d width(%d)... ", (self_loops) ? "7-pt" : "6-pt", width); fflush(stdout); - if (coo_graph.InitGrid3d(width, self_loops)) exit(1); - } - else if (type == string("wheel")) - { - VertexId spokes; - args.GetCmdLineArgument("spokes", spokes); - printf("Generating wheel spokes(%d)... ", spokes); fflush(stdout); - if (coo_graph.InitWheel(spokes)) exit(1); - } - else if (type == string("market")) - { - string filename; - args.GetCmdLineArgument("file", filename); - printf("Generating MARKET for %s... ", filename.c_str()); fflush(stdout); - if (coo_graph.InitMarket(filename)) exit(1); - } - else - { - printf("Unsupported graph type\n"); - exit(1); - } - timer.Stop(); - printf("Done (%.3fs). %d non-zeros, %d rows, %d columns\n", - timer.ElapsedMillis() / 1000.0, - coo_graph.coo_tuples.size(), - coo_graph.row_dim, - coo_graph.col_dim); - fflush(stdout); - - if (g_verbose) - { - cout << coo_graph << "\n"; - } - - // Create vector - Value *h_vector = new Value[coo_graph.col_dim]; - AssignVectorValues(h_vector, coo_graph.col_dim); - if (g_verbose) - { - printf("Vector[%d]: ", coo_graph.col_dim); - DisplayResults(h_vector, coo_graph.col_dim); - printf("\n\n"); - } - - // Compute reference answer - Value *h_reference = new Value[coo_graph.row_dim]; - ComputeReference(coo_graph, h_vector, h_reference); - if (g_verbose) - { - printf("Results[%d]: ", coo_graph.row_dim); - DisplayResults(h_reference, coo_graph.row_dim); - printf("\n\n"); - } - - // Parameterization for SM35 - enum - { - COO_BLOCK_THREADS = 64, - COO_ITEMS_PER_THREAD = 10, - COO_SUBSCRIPTION_FACTOR = 4, - FINALIZE_BLOCK_THREADS = 256, - FINALIZE_ITEMS_PER_THREAD = 4, - }; - - // Run GPU version - TestDevice< - COO_BLOCK_THREADS, - COO_ITEMS_PER_THREAD, - COO_SUBSCRIPTION_FACTOR, - FINALIZE_BLOCK_THREADS, - FINALIZE_ITEMS_PER_THREAD>(coo_graph, h_vector, h_reference); - - // Cleanup - delete[] h_vector; - delete[] h_reference; - - return 0; -} - - - diff --git a/ml-xgboost/cub/experimental/defunct/test_device_seg_reduce.cu b/ml-xgboost/cub/experimental/defunct/test_device_seg_reduce.cu deleted file mode 100644 index 9a21f95..0000000 --- a/ml-xgboost/cub/experimental/defunct/test_device_seg_reduce.cu +++ /dev/null @@ -1,2142 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * An implementation of segmented reduction using a load-balanced parallelization - * strategy based on the MergePath decision path. - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include -#include -#include -#include - -#include - -#include "test_util.h" - -using namespace cub; -using namespace std; - - -/****************************************************************************** - * Globals, constants, and typedefs - ******************************************************************************/ - -bool g_verbose = false; -int g_timing_iterations = 1; -CachingDeviceAllocator g_allocator(true); - - -/****************************************************************************** - * Utility routines - ******************************************************************************/ - - -/** - * An pair of index offsets - */ -template -struct IndexPair -{ - OffsetT a_idx; - OffsetT b_idx; -}; - - -/** - * Computes the begin offsets into A and B for the specified - * location (diagonal) along the merge decision path - */ -template < - int BLOCK_THREADS, - typename IteratorA, - typename IteratorB, - typename OffsetT> -__device__ __forceinline__ void ParallelMergePathSearch( - OffsetT diagonal, - IteratorA a, - IteratorB b, - IndexPair begin, // Begin offsets into a and b - IndexPair end, // End offsets into a and b - IndexPair &intersection) // [out] Intersection offsets into a and b -{ - OffsetT a_split_min = CUB_MAX(diagonal - end.b_idx, begin.a_idx); - OffsetT a_split_max = CUB_MIN(diagonal, end.a_idx); - - while (a_split_min < a_split_max) - { - OffsetT a_distance = a_split_max - a_split_min; - OffsetT a_slice = (a_distance + BLOCK_THREADS - 1) >> Log2::VALUE; - OffsetT a_split_pivot = CUB_MIN(a_split_min + (threadIdx.x * a_slice), end.a_idx - 1); - - int move_up = (a[a_split_pivot] <= b[diagonal - a_split_pivot - 1]); - int num_up = __syncthreads_count(move_up); -/* - _CubLog("a_split_min(%d), a_split_max(%d) a_distance(%d), a_slice(%d), a_split_pivot(%d), move_up(%d), num_up(%d), a_begin(%d), a_end(%d)\n", - a_split_min, a_split_max, a_distance, a_slice, a_split_pivot, move_up, num_up, a_begin, a_end); -*/ - a_split_max = CUB_MIN(num_up * a_slice, end.a_idx); - a_split_min = CUB_MAX(a_split_max - a_slice, begin.a_idx) + 1; - } - - intersection.a_idx = CUB_MIN(a_split_min, end.a_idx); - intersection.b_idx = CUB_MIN(diagonal - a_split_min, end.b_idx); -} - -/** - * Computes the begin offsets into A and B for the specified - * location (diagonal) along the merge decision path - */ -template < - typename IteratorA, - typename IteratorB, - typename OffsetT> -__device__ __forceinline__ void MergePathSearch( - OffsetT diagonal, - IteratorA a, - IteratorB b, - IndexPair begin, // Begin offsets into a and b - IndexPair end, // End offsets into a and b - IndexPair &intersection) // [out] Intersection offsets into a and b -{ - OffsetT split_min = CUB_MAX(diagonal - end.b_idx, begin.a_idx); - OffsetT split_max = CUB_MIN(diagonal, end.a_idx); - - while (split_min < split_max) - { - OffsetT split_pivot = (split_min + split_max) >> 1; - if (a[split_pivot] <= b[diagonal - split_pivot - 1]) - { - // Move candidate split range up A, down B - split_min = split_pivot + 1; - } - else - { - // Move candidate split range up B, down A - split_max = split_pivot; - } - } - - intersection.a_idx = CUB_MIN(split_min, end.a_idx); - intersection.b_idx = CUB_MIN(diagonal - split_min, end.b_idx); -} - - -/****************************************************************************** - * Tuning policy types - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for BlockSegReduceRegion - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - bool _USE_SMEM_SEGMENT_CACHE, ///< Whether or not to cache incoming segment offsets in shared memory before reducing each tile - bool _USE_SMEM_VALUE_CACHE, ///< Whether or not to cache incoming values in shared memory before reducing each tile - CacheLoadModifier _LOAD_MODIFIER_SEGMENTS, ///< Cache load modifier for reading segment offsets - CacheLoadModifier _LOAD_MODIFIER_VALUES, ///< Cache load modifier for reading values - BlockReduceAlgorithm _REDUCE_ALGORITHM, ///< The BlockReduce algorithm to use - BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use -struct BlockSegReduceRegionPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - USE_SMEM_SEGMENT_CACHE = _USE_SMEM_SEGMENT_CACHE, ///< Whether or not to cache incoming segment offsets in shared memory before reducing each tile - USE_SMEM_VALUE_CACHE = _USE_SMEM_VALUE_CACHE, ///< Whether or not to cache incoming upcoming values in shared memory before reducing each tile - }; - - static const CacheLoadModifier LOAD_MODIFIER_SEGMENTS = _LOAD_MODIFIER_SEGMENTS; ///< Cache load modifier for reading segment offsets - static const CacheLoadModifier LOAD_MODIFIER_VALUES = _LOAD_MODIFIER_VALUES; ///< Cache load modifier for reading values - static const BlockReduceAlgorithm REDUCE_ALGORITHM = _REDUCE_ALGORITHM; ///< The BlockReduce algorithm to use - static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use -}; - - -/****************************************************************************** - * Persistent thread block types - ******************************************************************************/ - -/** - * \brief BlockSegReduceTiles implements a stateful abstraction of CUDA thread blocks for participating in device-wide segmented reduction. - */ -template < - typename BlockSegReduceRegionPolicy, ///< Parameterized BlockSegReduceRegionPolicy tuning policy - typename SegmentOffsetIterator, ///< Random-access input iterator type for reading segment end-offsets - typename ValueIterator, ///< Random-access input iterator type for reading values - typename OutputIteratorT, ///< Random-access output iterator type for writing segment reductions - typename ReductionOp, ///< Binary reduction operator type having member T operator()(const T &a, const T &b) - typename OffsetT> ///< Signed integer type for global offsets -struct BlockSegReduceRegion -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - // Constants - enum - { - BLOCK_THREADS = BlockSegReduceRegionPolicy::BLOCK_THREADS, - ITEMS_PER_THREAD = BlockSegReduceRegionPolicy::ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, /// Number of work items to be processed per tile - - USE_SMEM_SEGMENT_CACHE = BlockSegReduceRegionPolicy::USE_SMEM_SEGMENT_CACHE, ///< Whether or not to cache incoming segment offsets in shared memory before reducing each tile - USE_SMEM_VALUE_CACHE = BlockSegReduceRegionPolicy::USE_SMEM_VALUE_CACHE, ///< Whether or not to cache incoming upcoming values in shared memory before reducing each tile - - SMEM_SEGMENT_CACHE_ITEMS = USE_SMEM_SEGMENT_CACHE ? TILE_ITEMS : 1, - SMEM_VALUE_CACHE_ITEMS = USE_SMEM_VALUE_CACHE ? TILE_ITEMS : 1, - }; - - // Segment offset type - typedef typename std::iterator_traits::value_type SegmentOffset; - - // Value type - typedef typename std::iterator_traits::value_type Value; - - // Counting iterator type - typedef CountingInputIterator CountingIterator; - - // Segment offsets iterator wrapper type - typedef typename If<(IsPointer::VALUE), - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedInputIterator - SegmentOffsetIterator>::Type // Directly use the supplied input iterator type - WrappedSegmentOffsetIterator; - - // Values iterator wrapper type - typedef typename If<(IsPointer::VALUE), - CacheModifiedInputIterator, // Wrap the native input pointer with CacheModifiedInputIterator - ValueIterator>::Type // Directly use the supplied input iterator type - WrappedValueIterator; - - // Tail flag type for marking segment discontinuities - typedef int TailFlag; - - // Reduce-by-key data type tuple (segment-ID, value) - typedef KeyValuePair KeyValuePair; - - // Index pair data type - typedef IndexPair IndexPair; - - // BlockScan scan operator for reduction-by-segment - typedef ReduceByKeyOp ReduceByKeyOp; - - // Stateful BlockScan prefix callback type for managing a running total while scanning consecutive tiles - typedef RunningBlockPrefixCallbackOp< - KeyValuePair, - ReduceByKeyOp> - RunningPrefixCallbackOp; - - // Parameterized BlockShift type for exchanging index pairs - typedef BlockShift< - IndexPair, - BLOCK_THREADS> - BlockShift; - - // Parameterized BlockReduce type for block-wide reduction - typedef BlockReduce< - Value, - BLOCK_THREADS, - BlockSegReduceRegionPolicy::REDUCE_ALGORITHM> - BlockReduce; - - // Parameterized BlockScan type for block-wide reduce-value-by-key - typedef BlockScan< - KeyValuePair, - BLOCK_THREADS, - BlockSegReduceRegionPolicy::SCAN_ALGORITHM> - BlockScan; - - // Shared memory type for this threadblock - struct _TempStorage - { - union - { - // Smem needed for BlockScan - typename BlockScan::TempStorage scan; - - // Smem needed for BlockReduce - typename BlockReduce::TempStorage reduce; - - struct - { - // Smem needed for communicating start/end indices between threads for a given work tile - typename BlockShift::TempStorage shift; - - // Smem needed for caching segment end-offsets - SegmentOffset cached_segment_end_offsets[SMEM_SEGMENT_CACHE_ITEMS + 1]; - }; - - // Smem needed for caching values - Value cached_values[SMEM_VALUE_CACHE_ITEMS]; - }; - - IndexPair block_region_idx[2]; // The starting [0] and ending [1] pairs of segment and value indices for the threadblock's region - - // The first partial reduction tuple scattered by this thread block - KeyValuePair first_tuple; - }; - - - // Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Thread fields - //--------------------------------------------------------------------- - - _TempStorage &temp_storage; ///< Reference to shared storage - WrappedSegmentOffsetIterator d_segment_end_offsets; ///< A sequence of \p num_segments segment end-offsets - WrappedValueIterator d_values; ///< A sequence of \p num_values data to reduce - OutputIteratorT d_output; ///< A sequence of \p num_segments segment totals - CountingIterator d_value_offsets; ///< A sequence of \p num_values value-offsets - IndexPair *d_block_idx; - OffsetT num_values; ///< Total number of values to reduce - OffsetT num_segments; ///< Number of segments being reduced - Value identity; ///< Identity value (for zero-length segments) - ReductionOp reduction_op; ///< Reduction operator - ReduceByKeyOp scan_op; ///< Reduce-by-key scan operator - RunningPrefixCallbackOp prefix_op; ///< Stateful running total for block-wide prefix scan of partial reduction tuples - - - //--------------------------------------------------------------------- - // Operations - //--------------------------------------------------------------------- - - /** - * Constructor - */ - __device__ __forceinline__ - BlockSegReduceRegion( - TempStorage &temp_storage, ///< Reference to shared storage - SegmentOffsetIterator d_segment_end_offsets, ///< A sequence of \p num_segments segment end-offsets - ValueIterator d_values, ///< A sequence of \p num_values values - OutputIteratorT d_output, ///< A sequence of \p num_segments segment totals - IndexPair *d_block_idx, - OffsetT num_values, ///< Number of values to reduce - OffsetT num_segments, ///< Number of segments being reduced - Value identity, ///< Identity value (for zero-length segments) - ReductionOp reduction_op) ///< Reduction operator - : - temp_storage(temp_storage.Alias()), - d_segment_end_offsets(d_segment_end_offsets), - d_values(d_values), - d_value_offsets(0), - d_output(d_output), - d_block_idx(d_block_idx), - num_values(num_values), - num_segments(num_segments), - identity(identity), - reduction_op(reduction_op), - scan_op(reduction_op), - prefix_op(scan_op) - {} - - - /** - * Fast-path single-segment tile reduction. Perform a - * simple block-wide reduction and accumulate the result into - * the running total. - */ - __device__ __forceinline__ void SingleSegmentTile( - IndexPair next_tile_idx, - IndexPair block_idx) - { - OffsetT tile_values = next_tile_idx.b_idx - block_idx.b_idx; - - // Load a tile's worth of values (using identity for out-of-bounds items) - Value values[ITEMS_PER_THREAD]; - LoadDirectStriped(threadIdx.x, d_values + block_idx.b_idx, values, tile_values, identity); - - // Barrier for smem reuse - __syncthreads(); - - // Reduce the tile of values and update the running total in thread-0 - KeyValuePair tile_aggregate; - tile_aggregate.key = block_idx.a_idx; - tile_aggregate.value = BlockReduce(temp_storage.reduce).Reduce(values, reduction_op); - - if (threadIdx.x == 0) - { - prefix_op.running_total = scan_op(prefix_op.running_total, tile_aggregate); - } - } - - /** - * Fast-path empty-segment tile reduction. Write out a tile of identity - * values to output. - */ - __device__ __forceinline__ void EmptySegmentsTile( - IndexPair next_tile_idx, - IndexPair block_idx) - { - Value segment_reductions[ITEMS_PER_THREAD]; - - if (threadIdx.x == 0) - { - // The first segment gets the running segment total - segment_reductions[0] = prefix_op.running_total.value; - - // Update the running prefix - prefix_op.running_total.value = identity; - prefix_op.running_total.key = next_tile_idx.a_idx; - } - else - { - // Remainder of segments in this tile get identity - segment_reductions[0] = identity; - } - - // Remainder of segments in this tile get identity - #pragma unroll - for (int ITEM = 1; ITEM < ITEMS_PER_THREAD; ++ITEM) - segment_reductions[ITEM] = identity; - - // Store reductions - OffsetT tile_segments = next_tile_idx.a_idx - block_idx.a_idx; - StoreDirectStriped(threadIdx.x, d_output + block_idx.a_idx, segment_reductions, tile_segments); - } - - - /** - * Multi-segment tile reduction. - */ - template - __device__ __forceinline__ void MultiSegmentTile( - IndexPair block_idx, - IndexPair thread_idx, - IndexPair next_thread_idx, - IndexPair next_tile_idx) - { - IndexPair local_thread_idx; - local_thread_idx.a_idx = thread_idx.a_idx - block_idx.a_idx; - local_thread_idx.b_idx = thread_idx.b_idx - block_idx.b_idx; - - // Check if first segment end-offset is in range - bool valid_segment = FULL_TILE || (thread_idx.a_idx < next_thread_idx.a_idx); - - // Check if first value offset is in range - bool valid_value = FULL_TILE || (thread_idx.b_idx < next_thread_idx.b_idx); - - // Load first segment end-offset - OffsetT segment_end_offset = (valid_segment) ? - (USE_SMEM_SEGMENT_CACHE)? - temp_storage.cached_segment_end_offsets[local_thread_idx.a_idx] : - d_segment_end_offsets[thread_idx.a_idx] : - -1; - - OffsetT segment_ids[ITEMS_PER_THREAD]; - OffsetT value_offsets[ITEMS_PER_THREAD]; - - KeyValuePair first_partial; - first_partial.key = thread_idx.a_idx; - first_partial.value = identity; - - // Get segment IDs and gather-offsets for values - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - segment_ids[ITEM] = -1; - value_offsets[ITEM] = -1; - - // Whether or not we slide (a) right along the segment path or (b) down the value path - if (valid_segment && (!valid_value || (segment_end_offset <= thread_idx.b_idx))) - { - // Consume this segment index - segment_ids[ITEM] = thread_idx.a_idx; - thread_idx.a_idx++; - local_thread_idx.a_idx++; - - valid_segment = FULL_TILE || (thread_idx.a_idx < next_thread_idx.a_idx); - - // Read next segment end-offset (if valid) - if (valid_segment) - { - if (USE_SMEM_SEGMENT_CACHE) - segment_end_offset = temp_storage.cached_segment_end_offsets[local_thread_idx.a_idx]; - else - segment_end_offset = d_segment_end_offsets[thread_idx.a_idx]; - } - } - else if (valid_value) - { - // Consume this value index - value_offsets[ITEM] = thread_idx.b_idx; - thread_idx.b_idx++; - local_thread_idx.b_idx++; - - valid_value = FULL_TILE || (thread_idx.b_idx < next_thread_idx.b_idx); - } - } - - // Load values - Value values[ITEMS_PER_THREAD]; - - if (USE_SMEM_VALUE_CACHE) - { - // Barrier for smem reuse - __syncthreads(); - - OffsetT tile_values = next_tile_idx.b_idx - block_idx.b_idx; - - // Load a tile's worth of values (using identity for out-of-bounds items) - LoadDirectStriped(threadIdx.x, d_values + block_idx.b_idx, values, tile_values, identity); - - // Store to shared - StoreDirectStriped(threadIdx.x, temp_storage.cached_values, values, tile_values); - - // Barrier for smem reuse - __syncthreads(); - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - values[ITEM] = (value_offsets[ITEM] == -1) ? - identity : - temp_storage.cached_values[value_offsets[ITEM] - block_idx.b_idx]; - } - } - else - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - values[ITEM] = (value_offsets[ITEM] == -1) ? - identity : - d_values[value_offsets[ITEM]]; - } - } - - // Reduce within thread segments - KeyValuePair running_total = first_partial; - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (segment_ids[ITEM] != -1) - { - // Consume this segment index - d_output[segment_ids[ITEM]] = running_total.value; - -// _CubLog("Updating segment %d with value %lld\n", segment_ids[ITEM], running_total.value) - - if (first_partial.key == segment_ids[ITEM]) - first_partial.value = running_total.value; - - running_total.key = segment_ids[ITEM]; - running_total.value = identity; - } - - running_total.value = reduction_op(running_total.value, values[ITEM]); - } -/* - - // Barrier for smem reuse - __syncthreads(); - - // Use prefix scan to reduce values by segment-id. The segment-reductions end up in items flagged as segment-tails. - KeyValuePair block_aggregate; - BlockScan(temp_storage.scan).InclusiveScan( - pairs, // Scan input - pairs, // Scan output - scan_op, // Scan operator - block_aggregate, // Block-wide total (unused) - prefix_op); // Prefix operator for seeding the block-wide scan with the running total -*/ - -/* - // Check if first segment end-offset is in range - bool valid_segment = (thread_idx.a_idx < next_thread_idx.a_idx); - - // Check if first value offset is in range - bool valid_value = (thread_idx.b_idx < next_thread_idx.b_idx); - - // Load first segment end-offset - OffsetT segment_end_offset = (valid_segment) ? - d_segment_end_offsets[thread_idx.a_idx] : - num_values; // Out of range (the last segment end-offset is one-past the last value offset) - - // Load first value offset - OffsetT value_offset = (valid_value) ? - d_value_offsets[thread_idx.b_idx] : - num_values; // Out of range (one-past the last value offset) - - // Assemble segment-demarcating tail flags and partial reduction tuples - TailFlag tail_flags[ITEMS_PER_THREAD]; - KeyValuePair partial_reductions[ITEMS_PER_THREAD]; - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - // Default tuple and flag values - partial_reductions[ITEM].key = thread_idx.a_idx; - partial_reductions[ITEM].value = identity; - tail_flags[ITEM] = 0; - - // Whether or not we slide (a) right along the segment path or (b) down the value path - if (valid_segment && (!valid_value || (segment_end_offset <= value_offset))) - { - // Consume this segment index - - // Set tail flag noting the end of the segment - tail_flags[ITEM] = 1; - - // Increment segment index - thread_idx.a_idx++; - - // Read next segment end-offset (if valid) - if ((valid_segment = (thread_idx.a_idx < next_thread_idx.a_idx))) - segment_end_offset = d_segment_end_offsets[thread_idx.a_idx]; - } - else if (valid_value) - { - // Consume this value index - - // Update the tuple's value with the value at this index. - partial_reductions[ITEM].value = d_values[value_offset]; - - // Increment value index - thread_idx.b_idx++; - - // Read next value offset (if valid) - if ((valid_value = (thread_idx.b_idx < next_thread_idx.b_idx))) - value_offset = d_value_offsets[thread_idx.b_idx]; - } - } - - // Use prefix scan to reduce values by segment-id. The segment-reductions end up in items flagged as segment-tails. - KeyValuePair block_aggregate; - BlockScan(temp_storage.scan).InclusiveScan( - partial_reductions, // Scan input - partial_reductions, // Scan output - scan_op, // Scan operator - block_aggregate, // Block-wide total (unused) - prefix_op); // Prefix operator for seeding the block-wide scan with the running total - - // The first segment index for this region (hoist?) - OffsetT first_segment_idx = temp_storage.block_idx.a_idx[0]; - - // Scatter an accumulated reduction if it is the head of a valid segment - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if (tail_flags[ITEM]) - { - OffsetT segment_idx = partial_reductions[ITEM].key; - Value value = partial_reductions[ITEM].value; - - // Write value reduction to corresponding segment id - d_output[segment_idx] = value; - - // Save off the first value product that this thread block will scatter - if (segment_idx == first_segment_idx) - { - temp_storage.first_tuple.value = value; - } - } - } -*/ - } - - - - /** - * Have the thread block process the specified region of the MergePath decision path - */ - __device__ __forceinline__ void ProcessRegion( - OffsetT block_diagonal, - OffsetT next_block_diagonal, - KeyValuePair &first_tuple, // [Out] Valid in thread-0 - KeyValuePair &last_tuple) // [Out] Valid in thread-0 - { - // Thread block initialization - if (threadIdx.x < 2) - { - // Retrieve block starting and ending indices - IndexPair block_idx = {0, 0}; - if (gridDim.x > 1) - { - block_idx = d_block_idx[blockIdx.x + threadIdx.x]; - } - else if (threadIdx.x > 0) - { - block_idx.a_idx = num_segments; - block_idx.b_idx = num_values; - } - - // Share block starting and ending indices - temp_storage.block_region_idx[threadIdx.x] = block_idx; - - // Initialize the block's running prefix - if (threadIdx.x == 0) - { - prefix_op.running_total.key = block_idx.a_idx; - prefix_op.running_total.value = identity; - - // Initialize the "first scattered partial reduction tuple" to the prefix tuple (in case we don't actually scatter one) - temp_storage.first_tuple = prefix_op.running_total; - } - } - - // Ensure coherence of region indices - __syncthreads(); - - // Read block's starting indices - IndexPair block_idx = temp_storage.block_region_idx[0]; - - // Have the thread block iterate over the region - #pragma unroll 1 - while (block_diagonal < next_block_diagonal) - { - // Read block's ending indices (hoist?) - IndexPair next_block_idx = temp_storage.block_region_idx[1]; - - // Clamp the per-thread search range to within one work-tile of block's current indices - IndexPair next_tile_idx; - next_tile_idx.a_idx = CUB_MIN(next_block_idx.a_idx, block_idx.a_idx + TILE_ITEMS); - next_tile_idx.b_idx = CUB_MIN(next_block_idx.b_idx, block_idx.b_idx + TILE_ITEMS); - - // Have each thread search for the end-indices of its subranges within the segment and value inputs - IndexPair next_thread_idx; - if (USE_SMEM_SEGMENT_CACHE) - { - // Search in smem cache - OffsetT num_segments = next_tile_idx.a_idx - block_idx.a_idx; - - // Load global - SegmentOffset segment_offsets[ITEMS_PER_THREAD]; - LoadDirectStriped(threadIdx.x, d_segment_end_offsets + block_idx.a_idx, segment_offsets, num_segments, num_values); - - // Store to shared - StoreDirectStriped(threadIdx.x, temp_storage.cached_segment_end_offsets, segment_offsets); - - __syncthreads(); - - OffsetT next_thread_diagonal = block_diagonal + ((threadIdx.x + 1) * ITEMS_PER_THREAD); - - MergePathSearch( - next_thread_diagonal, // Next thread diagonal - temp_storage.cached_segment_end_offsets - block_idx.a_idx, // A (segment end-offsets) - d_value_offsets, // B (value offsets) - block_idx, // Start indices into A and B - next_tile_idx, // End indices into A and B - next_thread_idx); // [out] diagonal intersection indices into A and B - } - else - { - // Search in global - - OffsetT next_thread_diagonal = block_diagonal + ((threadIdx.x + 1) * ITEMS_PER_THREAD); - - MergePathSearch( - next_thread_diagonal, // Next thread diagonal - d_segment_end_offsets, // A (segment end-offsets) - d_value_offsets, // B (value offsets) - block_idx, // Start indices into A and B - next_tile_idx, // End indices into A and B - next_thread_idx); // [out] diagonal intersection indices into A and B - } - - // Share thread end-indices to get thread begin-indices and tile end-indices - IndexPair thread_idx; - - BlockShift(temp_storage.shift).Up( - next_thread_idx, // Input item - thread_idx, // [out] Output item - block_idx, // Prefix item to be provided to thread0 - next_tile_idx); // [out] Suffix item shifted out by the threadBLOCK_THREADS-1 to be provided to all threads - -// if (block_idx.a_idx == next_tile_idx.a_idx) -// { -// // There are no segment end-offsets in this tile. Perform a -// // simple block-wide reduction and accumulate the result into -// // the running total. -// SingleSegmentTile(next_tile_idx, block_idx); -// } -// else if (block_idx.b_idx == next_tile_idx.b_idx) -// { -// // There are no values in this tile (only empty segments). -// EmptySegmentsTile(next_tile_idx.a_idx, block_idx.a_idx); -// } -// else - if ((next_tile_idx.a_idx < num_segments) && (next_tile_idx.b_idx < num_values)) - { - // Merge the tile's segment and value indices (full tile) - MultiSegmentTile(block_idx, thread_idx, next_thread_idx, next_tile_idx); - } - else - { - // Merge the tile's segment and value indices (partially full tile) - MultiSegmentTile(block_idx, thread_idx, next_thread_idx, next_tile_idx); - } - - // Advance the block's indices in preparation for the next tile - block_idx = next_tile_idx; - - // Advance to the next region in the decision path - block_diagonal += TILE_ITEMS; - - // Barrier for smem reuse - __syncthreads(); - } - - // Get first and last tuples for the region - if (threadIdx.x == 0) - { - first_tuple = temp_storage.first_tuple; - last_tuple = prefix_op.running_total; - } - - } - - -}; - - - - - - - - -/****************************************************************************** - * Tuning policy types - ******************************************************************************/ - -/** - * Parameterizable tuning policy type for BlockSegReduceRegionByKey - */ -template < - int _BLOCK_THREADS, ///< Threads per thread block - int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use - bool _LOAD_WARP_TIME_SLICING, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage) - CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements - BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use -struct BlockSegReduceRegionByKeyPolicy -{ - enum - { - BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) - LOAD_WARP_TIME_SLICING = _LOAD_WARP_TIME_SLICING, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage) }; - }; - - static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use - static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements - static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use -}; - - -/****************************************************************************** - * Persistent thread block types - ******************************************************************************/ - -/** - * \brief BlockSegReduceRegionByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key. - */ -template < - typename BlockSegReduceRegionByKeyPolicy, ///< Parameterized BlockSegReduceRegionByKeyPolicy tuning policy - typename InputIteratorT, ///< Random-access iterator referencing key-value input tuples - typename OutputIteratorT, ///< Random-access iterator referencing segment output totals - typename ReductionOp> ///< Binary reduction operator type having member T operator()(const T &a, const T &b) -struct BlockSegReduceRegionByKey -{ - //--------------------------------------------------------------------- - // Types and constants - //--------------------------------------------------------------------- - - // Constants - enum - { - BLOCK_THREADS = BlockSegReduceRegionByKeyPolicy::BLOCK_THREADS, - ITEMS_PER_THREAD = BlockSegReduceRegionByKeyPolicy::ITEMS_PER_THREAD, - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - }; - - // KeyValuePair input type - typedef typename std::iterator_traits::value_type KeyValuePair; - - // Signed integer type for global offsets - typedef typename KeyValuePair::Key OffsetT; - - // Value type - typedef typename KeyValuePair::Value Value; - - // Head flag type - typedef int HeadFlag; - - // Input iterator wrapper type for loading KeyValuePair elements through cache - typedef CacheModifiedInputIterator< - BlockSegReduceRegionByKeyPolicy::LOAD_MODIFIER, - KeyValuePair, - OffsetT> - WrappedInputIteratorT; - - // Parameterized BlockLoad type - typedef BlockLoad< - WrappedInputIteratorT, - BLOCK_THREADS, - ITEMS_PER_THREAD, - BlockSegReduceRegionByKeyPolicy::LOAD_ALGORITHM, - BlockSegReduceRegionByKeyPolicy::LOAD_WARP_TIME_SLICING> - BlockLoad; - - // BlockScan scan operator for reduction-by-segment - typedef ReduceByKeyOp ReduceByKeyOp; - - // Stateful BlockScan prefix callback type for managing a running total while scanning consecutive tiles - typedef RunningBlockPrefixCallbackOp< - KeyValuePair, - ReduceByKeyOp> - RunningPrefixCallbackOp; - - // Parameterized BlockScan type for block-wide reduce-value-by-key - typedef BlockScan< - KeyValuePair, - BLOCK_THREADS, - BlockSegReduceRegionByKeyPolicy::SCAN_ALGORITHM> - BlockScan; - - // Parameterized BlockDiscontinuity type for identifying key discontinuities - typedef BlockDiscontinuity< - OffsetT, - BLOCK_THREADS> - BlockDiscontinuity; - - // Operator for detecting discontinuities in a list of segment identifiers. - struct NewSegmentOp - { - /// Returns true if row_b is the start of a new row - __device__ __forceinline__ bool operator()(const OffsetT& b, const OffsetT& a) - { - return (a != b); - } - }; - - // Shared memory type for this threadblock - struct _TempStorage - { - union - { - typename BlockLoad::TempStorage load; // Smem needed for tile loading - struct { - typename BlockScan::TempStorage scan; // Smem needed for reduce-value-by-segment scan - typename BlockDiscontinuity::TempStorage discontinuity; // Smem needed for head-flagging - }; - }; - }; - - // Alias wrapper allowing storage to be unioned - struct TempStorage : Uninitialized<_TempStorage> {}; - - - //--------------------------------------------------------------------- - // Thread fields - //--------------------------------------------------------------------- - - _TempStorage &temp_storage; ///< Reference to shared storage - WrappedInputIteratorT d_tuple_partials; ///< A sequence of partial reduction tuples to scan - OutputIteratorT d_output; ///< A sequence of segment totals - Value identity; ///< Identity value (for zero-length segments) - ReduceByKeyOp scan_op; ///< Reduce-by-key scan operator - RunningPrefixCallbackOp prefix_op; ///< Stateful running total for block-wide prefix scan of partial reduction tuples - - - //--------------------------------------------------------------------- - // Operations - //--------------------------------------------------------------------- - - /** - * Constructor - */ - __device__ __forceinline__ - BlockSegReduceRegionByKey( - TempStorage &temp_storage, ///< Reference to shared storage - InputIteratorT d_tuple_partials, ///< A sequence of partial reduction tuples to scan - OutputIteratorT d_output, ///< A sequence of segment totals - Value identity, ///< Identity value (for zero-length segments) - ReductionOp reduction_op) ///< Reduction operator - : - temp_storage(temp_storage.Alias()), - d_tuple_partials(d_tuple_partials), - d_output(d_output), - identity(identity), - scan_op(reduction_op), - prefix_op(scan_op) - {} - - - - /** - * Processes a reduce-value-by-key input tile, outputting reductions for each segment - */ - template - __device__ __forceinline__ - void ProcessTile( - OffsetT block_offset, - OffsetT first_segment_idx, - OffsetT last_segment_idx, - int guarded_items = TILE_ITEMS) - { - KeyValuePair partial_reductions[ITEMS_PER_THREAD]; - OffsetT segment_ids[ITEMS_PER_THREAD]; - HeadFlag head_flags[ITEMS_PER_THREAD]; - - // Load a tile of block partials from previous kernel - if (FULL_TILE) - { - // Full tile - BlockLoad(temp_storage.load).Load(d_tuple_partials + block_offset, partial_reductions); - } - else - { - KeyValuePair oob_default; - oob_default.key = last_segment_idx; // The last segment ID to be reduced - oob_default.value = identity; - - // Partially-full tile - BlockLoad(temp_storage.load).Load(d_tuple_partials + block_offset, partial_reductions, guarded_items, oob_default); - } - - // Barrier for shared memory reuse - __syncthreads(); - - // Copy the segment IDs for head-flagging - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - segment_ids[ITEM] = partial_reductions[ITEM].key; - } - - // FlagT segment heads by looking for discontinuities - BlockDiscontinuity(temp_storage.discontinuity).FlagHeads( - head_flags, // [out] Head flags - segment_ids, // Segment ids - NewSegmentOp(), // Functor for detecting start of new rows - prefix_op.running_total.key); // Last segment ID from previous tile to compare with first segment ID in this tile - - // Reduce-value-by-segment across partial_reductions using exclusive prefix scan - KeyValuePair block_aggregate; - BlockScan(temp_storage.scan).ExclusiveScan( - partial_reductions, // Scan input - partial_reductions, // Scan output - scan_op, // Scan operator - block_aggregate, // Block-wide total (unused) - prefix_op); // Prefix operator for seeding the block-wide scan with the running total - - // Scatter an accumulated reduction if it is the head of a valid segment - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) - { - if (head_flags[ITEM]) - { - d_output[partial_reductions[ITEM].key] = partial_reductions[ITEM].value; - } - } - } - - - /** - * Iterate over input tiles belonging to this thread block - */ - __device__ __forceinline__ - void ProcessRegion( - OffsetT block_offset, - OffsetT block_end, - OffsetT first_segment_idx, - OffsetT last_segment_idx) - { - if (threadIdx.x == 0) - { - // Initialize running prefix to the first segment index paired with identity - prefix_op.running_total.key = first_segment_idx; - prefix_op.running_total.value = identity; - } - - // Process full tiles - while (block_offset + TILE_ITEMS <= block_end) - { - ProcessTile(block_offset, first_segment_idx, last_segment_idx); - __syncthreads(); - - block_offset += TILE_ITEMS; - } - - // Process final value tile (if present) - int guarded_items = block_end - block_offset; - if (guarded_items) - { - ProcessTile(block_offset, first_segment_idx, last_segment_idx, guarded_items); - } - } -}; - - - -/****************************************************************************** - * Kernel entrypoints - ******************************************************************************/ - -/** - * Segmented reduce region kernel entry point (multi-block). - */ - -template < - typename SegmentOffsetIterator, ///< Random-access input iterator type for reading segment end-offsets - typename OffsetT> ///< Signed integer type for global offsets -__global__ void SegReducePartitionKernel( - SegmentOffsetIterator d_segment_end_offsets, ///< [in] A sequence of \p num_segments segment end-offsets - IndexPair *d_block_idx, - int num_partition_samples, - OffsetT num_values, ///< [in] Number of values to reduce - OffsetT num_segments, ///< [in] Number of segments being reduced - GridEvenShare even_share) ///< [in] Even-share descriptor for mapping an equal number of tiles onto each thread block -{ - // Segment offset type - typedef typename std::iterator_traits::value_type SegmentOffset; - - // Counting iterator type - typedef CountingInputIterator CountingIterator; - - // Cache-modified iterator for segment end-offsets - CacheModifiedInputIterator d_wrapped_segment_end_offsets(d_segment_end_offsets); - - // Counting iterator for value offsets - CountingIterator d_value_offsets(0); - - // Initialize even-share to tell us where to start and stop our tile-processing - int partition_id = (blockDim.x * blockIdx.x) + threadIdx.x; - even_share.Init(partition_id); - - // Search for block starting and ending indices - IndexPair start_idx = {0, 0}; - IndexPair end_idx = {num_segments, num_values}; - IndexPair block_idx; - - MergePathSearch( - even_share.block_offset, // Next thread diagonal - d_wrapped_segment_end_offsets, // A (segment end-offsets) - d_value_offsets, // B (value offsets) - start_idx, // Start indices into A and B - end_idx, // End indices into A and B - block_idx); // [out] diagonal intersection indices into A and B - - // Write output - if (partition_id < num_partition_samples) - { - d_block_idx[partition_id] = block_idx; - } -} - - -/** - * Segmented reduce region kernel entry point (multi-block). - */ -template < - typename BlockSegReduceRegionPolicy, ///< Parameterized BlockSegReduceRegionPolicy tuning policy - typename SegmentOffsetIterator, ///< Random-access input iterator type for reading segment end-offsets - typename ValueIterator, ///< Random-access input iterator type for reading values - typename OutputIteratorT, ///< Random-access output iterator type for writing segment reductions - typename ReductionOp, ///< Binary reduction operator type having member T operator()(const T &a, const T &b) - typename OffsetT, ///< Signed integer type for global offsets - typename Value> ///< Value type -__launch_bounds__ (BlockSegReduceRegionPolicy::BLOCK_THREADS) -__global__ void SegReduceRegionKernel( - SegmentOffsetIterator d_segment_end_offsets, ///< [in] A sequence of \p num_segments segment end-offsets - ValueIterator d_values, ///< [in] A sequence of \p num_values values - OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals - KeyValuePair *d_tuple_partials, ///< [out] A sequence of (gridDim.x * 2) partial reduction tuples - IndexPair *d_block_idx, - OffsetT num_values, ///< [in] Number of values to reduce - OffsetT num_segments, ///< [in] Number of segments being reduced - Value identity, ///< [in] Identity value (for zero-length segments) - ReductionOp reduction_op, ///< [in] Reduction operator - GridEvenShare even_share) ///< [in] Even-share descriptor for mapping an equal number of tiles onto each thread block -{ - typedef KeyValuePair KeyValuePair; - - // Specialize threadblock abstraction type for reducing a range of segmented values - typedef BlockSegReduceRegion< - BlockSegReduceRegionPolicy, - SegmentOffsetIterator, - ValueIterator, - OutputIteratorT, - ReductionOp, - OffsetT> - BlockSegReduceRegion; - - // Shared memory allocation - __shared__ typename BlockSegReduceRegion::TempStorage temp_storage; - - // Initialize threadblock even-share to tell us where to start and stop our tile-processing - even_share.BlockInit(); - - // Construct persistent thread block - BlockSegReduceRegion thread_block( - temp_storage, - d_segment_end_offsets, - d_values, - d_output, - d_block_idx, - num_values, - num_segments, - identity, - reduction_op); - - // First and last partial reduction tuples within the range (valid in thread-0) - KeyValuePair first_tuple, last_tuple; - - // Consume block's region of work - thread_block.ProcessRegion( - even_share.block_offset, - even_share.block_end, - first_tuple, - last_tuple); - - if (threadIdx.x == 0) - { - if (gridDim.x > 1) - { - // Special case where the first segment written and the carry-out are for the same segment - if (first_tuple.key == last_tuple.key) - { - first_tuple.value = identity; - } - - // Write the first and last partial products from this thread block so - // that they can be subsequently "fixed up" in the next kernel. - d_tuple_partials[blockIdx.x * 2] = first_tuple; - d_tuple_partials[(blockIdx.x * 2) + 1] = last_tuple; - } - } - -} - - -/** - * Segmented reduce region kernel entry point (single-block). - */ -template < - typename BlockSegReduceRegionByKeyPolicy, ///< Parameterized BlockSegReduceRegionByKeyPolicy tuning policy - typename InputIteratorT, ///< Random-access iterator referencing key-value input tuples - typename OutputIteratorT, ///< Random-access iterator referencing segment output totals - typename ReductionOp, ///< Binary reduction operator type having member T operator()(const T &a, const T &b) - typename OffsetT, ///< Signed integer type for global offsets - typename Value> ///< Value type -__launch_bounds__ (BlockSegReduceRegionByKeyPolicy::BLOCK_THREADS, 1) -__global__ void SegReduceRegionByKeyKernel( - InputIteratorT d_tuple_partials, ///< [in] A sequence of partial reduction tuples - OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals - OffsetT num_segments, ///< [in] Number of segments in the \p d_output sequence - int num_tuple_partials, ///< [in] Number of partial reduction tuples being reduced - Value identity, ///< [in] Identity value (for zero-length segments) - ReductionOp reduction_op) ///< [in] Reduction operator -{ - // Specialize threadblock abstraction type for reducing a range of values by key - typedef BlockSegReduceRegionByKey< - BlockSegReduceRegionByKeyPolicy, - InputIteratorT, - OutputIteratorT, - ReductionOp> - BlockSegReduceRegionByKey; - - // Shared memory allocation - __shared__ typename BlockSegReduceRegionByKey::TempStorage temp_storage; - - // Construct persistent thread block - BlockSegReduceRegionByKey thread_block( - temp_storage, - d_tuple_partials, - d_output, - identity, - reduction_op); - - // Process input tiles - thread_block.ProcessRegion( - 0, // Region start - num_tuple_partials, // Region end - 0, // First segment ID - num_segments); // Last segment ID (one-past) -} - - - - -/****************************************************************************** - * Dispatch - ******************************************************************************/ - -/** - * Utility class for dispatching the appropriately-tuned kernels for DeviceReduce - */ -template < - typename ValueIterator, ///< Random-access input iterator type for reading values - typename SegmentOffsetIterator, ///< Random-access input iterator type for reading segment end-offsets - typename OutputIteratorT, ///< Random-access output iterator type for writing segment reductions - typename ReductionOp, ///< Binary reduction operator type having member T operator()(const T &a, const T &b) - typename OffsetT> ///< Signed integer type for global offsets -struct DeviceSegReduceDispatch -{ - // Value type - typedef typename std::iterator_traits::value_type Value; - - // Reduce-by-key data type tuple (segment-ID, value) - typedef KeyValuePair KeyValuePair; - - // Index pair data type - typedef IndexPairIndexPair; - - - /****************************************************************************** - * Tuning policies - ******************************************************************************/ - - /// SM35 - struct Policy350 - { - // ReduceRegionPolicy - typedef BlockSegReduceRegionPolicy< - 128, ///< Threads per thread block - 6, ///< Items per thread (per tile of input) - true, ///< Whether or not to cache incoming segment offsets in shared memory before reducing each tile - false, ///< Whether or not to cache incoming values in shared memory before reducing each tile - LOAD_DEFAULT, ///< Cache load modifier for reading segment offsets - LOAD_LDG, ///< Cache load modifier for reading values - BLOCK_REDUCE_RAKING, ///< The BlockReduce algorithm to use - BLOCK_SCAN_WARP_SCANS> ///< The BlockScan algorithm to use - SegReduceRegionPolicy; - - // ReduceRegionByKeyPolicy - typedef BlockSegReduceRegionByKeyPolicy< - 256, ///< Threads per thread block - 9, ///< Items per thread (per tile of input) - BLOCK_LOAD_DIRECT, ///< The BlockLoad algorithm to use - false, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage) - LOAD_LDG, ///< Cache load modifier for reading input elements - BLOCK_SCAN_WARP_SCANS> ///< The BlockScan algorithm to use - SegReduceRegionByKeyPolicy; - }; - - - /// SM10 - struct Policy100 - { - // ReduceRegionPolicy - typedef BlockSegReduceRegionPolicy< - 128, ///< Threads per thread block - 3, ///< Items per thread (per tile of input) - false, ///< Whether or not to cache incoming segment offsets in shared memory before reducing each tile - false, ///< Whether or not to cache incoming values in shared memory before reducing each tile - LOAD_DEFAULT, ///< Cache load modifier for reading segment offsets - LOAD_DEFAULT, ///< Cache load modifier for reading values - BLOCK_REDUCE_RAKING, ///< The BlockReduce algorithm to use - BLOCK_SCAN_RAKING> ///< The BlockScan algorithm to use - SegReduceRegionPolicy; - - // ReduceRegionByKeyPolicy - typedef BlockSegReduceRegionByKeyPolicy< - 128, ///< Threads per thread block - 3, ///< Items per thread (per tile of input) - BLOCK_LOAD_WARP_TRANSPOSE, ///< The BlockLoad algorithm to use - false, ///< Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage) - LOAD_DEFAULT, ///< Cache load modifier for reading input elements - BLOCK_SCAN_WARP_SCANS> ///< The BlockScan algorithm to use - SegReduceRegionByKeyPolicy; - }; - - - /****************************************************************************** - * Tuning policies of current PTX compiler pass - ******************************************************************************/ - -#if (CUB_PTX_ARCH >= 350) - typedef Policy350 PtxPolicy; -/* -#elif (CUB_PTX_ARCH >= 300) - typedef Policy300 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 200) - typedef Policy200 PtxPolicy; - -#elif (CUB_PTX_ARCH >= 130) - typedef Policy130 PtxPolicy; -*/ -#else - typedef Policy100 PtxPolicy; - -#endif - - // "Opaque" policies (whose parameterizations aren't reflected in the type signature) - struct PtxSegReduceRegionPolicy : PtxPolicy::SegReduceRegionPolicy {}; - struct PtxSegReduceRegionByKeyPolicy : PtxPolicy::SegReduceRegionByKeyPolicy {}; - - - /****************************************************************************** - * Utilities - ******************************************************************************/ - - /** - * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use - */ - template < - typename SegReduceKernelConfig, - typename SegReduceByKeyKernelConfig> - __host__ __device__ __forceinline__ - static void InitConfigs( - int ptx_version, - SegReduceKernelConfig &seg_reduce_region_config, - SegReduceByKeyKernelConfig &seg_reduce_region_by_key_config) - { - #if (CUB_PTX_ARCH > 0) - - // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy - seg_reduce_region_config.Init(); - seg_reduce_region_by_key_config.Init(); - - #else - - // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version - if (ptx_version >= 350) - { - seg_reduce_region_config.template Init(); - seg_reduce_region_by_key_config.template Init(); - } -/* - else if (ptx_version >= 300) - { - seg_reduce_region_config.template Init(); - seg_reduce_region_by_key_config.template Init(); - } - else if (ptx_version >= 200) - { - seg_reduce_region_config.template Init(); - seg_reduce_region_by_key_config.template Init(); - } - else if (ptx_version >= 130) - { - seg_reduce_region_config.template Init(); - seg_reduce_region_by_key_config.template Init(); - } -*/ - else - { - seg_reduce_region_config.template Init(); - seg_reduce_region_by_key_config.template Init(); - } - - #endif - } - - - /** - * SegReduceRegionKernel kernel dispatch configuration - */ - struct SegReduceKernelConfig - { - int block_threads; - int items_per_thread; - bool use_smem_segment_cache; - bool use_smem_value_cache; - CacheLoadModifier load_modifier_segments; - CacheLoadModifier load_modifier_values; - BlockReduceAlgorithm reduce_algorithm; - BlockScanAlgorithm scan_algorithm; - - template - __host__ __device__ __forceinline__ - void Init() - { - block_threads = SegReduceRegionPolicy::BLOCK_THREADS; - items_per_thread = SegReduceRegionPolicy::ITEMS_PER_THREAD; - use_smem_segment_cache = SegReduceRegionPolicy::USE_SMEM_SEGMENT_CACHE; - use_smem_value_cache = SegReduceRegionPolicy::USE_SMEM_VALUE_CACHE; - load_modifier_segments = SegReduceRegionPolicy::LOAD_MODIFIER_SEGMENTS; - load_modifier_values = SegReduceRegionPolicy::LOAD_MODIFIER_VALUES; - reduce_algorithm = SegReduceRegionPolicy::REDUCE_ALGORITHM; - scan_algorithm = SegReduceRegionPolicy::SCAN_ALGORITHM; - } - }; - - /** - * SegReduceRegionByKeyKernel kernel dispatch configuration - */ - struct SegReduceByKeyKernelConfig - { - int block_threads; - int items_per_thread; - BlockLoadAlgorithm load_algorithm; - bool load_warp_time_slicing; - CacheLoadModifier load_modifier; - BlockScanAlgorithm scan_algorithm; - - template - __host__ __device__ __forceinline__ - void Init() - { - block_threads = SegReduceRegionByKeyPolicy::BLOCK_THREADS; - items_per_thread = SegReduceRegionByKeyPolicy::ITEMS_PER_THREAD; - load_algorithm = SegReduceRegionByKeyPolicy::LOAD_ALGORITHM; - load_warp_time_slicing = SegReduceRegionByKeyPolicy::LOAD_WARP_TIME_SLICING; - load_modifier = SegReduceRegionByKeyPolicy::LOAD_MODIFIER; - scan_algorithm = SegReduceRegionByKeyPolicy::SCAN_ALGORITHM; - } - }; - - - /****************************************************************************** - * Dispatch entrypoints - ******************************************************************************/ - - /** - * Internal dispatch routine for computing a device-wide segmented reduction. - */ - template < - typename SegReducePartitionKernelPtr, - typename SegReduceRegionKernelPtr, ///< Function type of cub::SegReduceRegionKernel - typename SegReduceRegionByKeyKernelPtr> ///< Function type of cub::SegReduceRegionByKeyKernel - __host__ __device__ __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation. - ValueIterator d_values, ///< [in] A sequence of \p num_values data to reduce - SegmentOffsetIterator d_segment_offsets, ///< [in] A sequence of (\p num_segments + 1) segment offsets - OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals - OffsetT num_values, ///< [in] Total number of values to reduce - OffsetT num_segments, ///< [in] Number of segments being reduced - Value identity, ///< [in] Identity value (for zero-length segments) - ReductionOp reduction_op, ///< [in] Reduction operator - cudaStream_t stream, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous, ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - int sm_version, ///< [in] SM version of target device to use when computing SM occupancy - SegReducePartitionKernelPtr seg_reduce_partition_kernel, ///< [in] Kernel function pointer to parameterization of cub::SegReduceRegionKernel - SegReduceRegionKernelPtr seg_reduce_region_kernel, ///< [in] Kernel function pointer to parameterization of cub::SegReduceRegionKernel - SegReduceRegionByKeyKernelPtr seg_reduce_region_by_key_kernel, ///< [in] Kernel function pointer to parameterization of cub::SegReduceRegionByKeyKernel - SegReduceKernelConfig &seg_reduce_region_config, ///< [in] Dispatch parameters that match the policy that \p seg_reduce_region_kernel was compiled for - SegReduceByKeyKernelConfig &seg_reduce_region_by_key_config) ///< [in] Dispatch parameters that match the policy that \p seg_reduce_region_by_key_kernel was compiled for - { -#ifndef CUB_RUNTIME_ENABLED - - // Kernel launch not supported from this device - return CubDebug(cudaErrorNotSupported ); - -#else - - cudaError error = cudaSuccess; - do - { - // Dispatch two kernels: (1) a multi-block segmented reduction - // to reduce regions by block, and (2) a single-block reduce-by-key kernel - // to "fix up" segments spanning more than one region. - - // Tile size of seg_reduce_region_kernel - int tile_size = seg_reduce_region_config.block_threads * seg_reduce_region_config.items_per_thread; - - // Get device ordinal - int device_ordinal; - if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; - - // Get SM count - int sm_count; - if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; - - // Get SM occupancy for histogram_region_kernel - int seg_reduce_region_sm_occupancy; - if (CubDebug(error = MaxSmOccupancy( - seg_reduce_region_sm_occupancy, - sm_version, - seg_reduce_region_kernel, - seg_reduce_region_config.block_threads))) break; - - // Get device occupancy for histogram_region_kernel - int seg_reduce_region_occupancy = seg_reduce_region_sm_occupancy * sm_count; - - // Even-share work distribution - int num_diagonals = num_values + num_segments; // Total number of work items - int subscription_factor = seg_reduce_region_sm_occupancy; // Amount of CTAs to oversubscribe the device beyond actively-resident (heuristic) - int max_grid_size = seg_reduce_region_occupancy * subscription_factor; - GridEvenShareeven_share( - num_diagonals, - max_grid_size, - tile_size); - - // Get grid size for seg_reduce_region_kernel - int seg_reduce_region_grid_size = even_share.grid_size; - - // Number of "fix-up" reduce-by-key tuples (2 per thread block) - int num_tuple_partials = seg_reduce_region_grid_size * 2; - int num_partition_samples = seg_reduce_region_grid_size + 1; - - // Temporary storage allocation requirements - void* allocations[2]; - size_t allocation_sizes[2] = - { - num_tuple_partials * sizeof(KeyValuePair), // bytes needed for "fix-up" reduce-by-key tuples - num_partition_samples * sizeof(IndexPair), // bytes needed block indices - }; - - // Alias the temporary allocations from the single storage blob (or set the necessary size of the blob) - if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; - if (d_temp_storage == NULL) - { - // Return if the caller is simply requesting the size of the storage allocation - return cudaSuccess; - } - - // Alias the allocations - KeyValuePair *d_tuple_partials = (KeyValuePair*) allocations[0]; // "fix-up" tuples - IndexPair *d_block_idx = (IndexPair *) allocations[1]; // block starting/ending indices - - // Array of segment end-offsets - SegmentOffsetIterator d_segment_end_offsets = d_segment_offsets + 1; - - // Grid launch params for seg_reduce_partition_kernel - int partition_block_size = 32; - int partition_grid_size = (num_partition_samples + partition_block_size - 1) / partition_block_size; - - // Partition work among multiple thread blocks if necessary - if (seg_reduce_region_grid_size > 1) - { - // Log seg_reduce_partition_kernel configuration - if (debug_synchronous) _CubLog("Invoking seg_reduce_partition_kernel<<<%d, %d, 0, %lld>>>()\n", - partition_grid_size, partition_block_size, (long long) stream); - - // Invoke seg_reduce_partition_kernel - seg_reduce_partition_kernel<<>>( - d_segment_end_offsets, ///< [in] A sequence of \p num_segments segment end-offsets - d_block_idx, - num_partition_samples, - num_values, ///< [in] Number of values to reduce - num_segments, ///< [in] Number of segments being reduced - even_share); ///< [in] Even-share descriptor for mapping an equal number of tiles onto each thread block - - // Sync the stream if specified - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } - - // Log seg_reduce_region_kernel configuration - if (debug_synchronous) _CubLog("Invoking seg_reduce_region_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", - seg_reduce_region_grid_size, seg_reduce_region_config.block_threads, (long long) stream, seg_reduce_region_config.items_per_thread, seg_reduce_region_sm_occupancy); - - // Mooch - if (CubDebug(error = cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte))) break; - - // Invoke seg_reduce_region_kernel - seg_reduce_region_kernel<<>>( - d_segment_end_offsets, - d_values, - d_output, - d_tuple_partials, - d_block_idx, - num_values, - num_segments, - identity, - reduction_op, - even_share); - - // Sync the stream if specified - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; -/* - // Perform "fix-up" of region partial reductions if grid size is greater than one thread block - if (seg_reduce_region_grid_size > 1) - { - // Log seg_reduce_region_by_key_kernel configuration - if (debug_synchronous) _CubLog("Invoking seg_reduce_region_by_key_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread\n", - 1, seg_reduce_region_by_key_config.block_threads, (long long) stream, seg_reduce_region_by_key_config.items_per_thread); - - // Invoke seg_reduce_region_by_key_kernel - seg_reduce_region_by_key_kernel<<<1, seg_reduce_region_by_key_config.block_threads, 0, stream>>>( - d_tuple_partials, - d_output, - num_segments, - num_tuple_partials, - identity, - reduction_op); - - // Sync the stream if specified - if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; - } -*/ - } - - while (0); - - return error; - -#endif // CUB_RUNTIME_ENABLED - } - - - /** - * Internal dispatch routine for computing a device-wide segmented reduction. - */ - __host__ __device__ __forceinline__ - static cudaError_t Dispatch( - void* d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation. - ValueIterator d_values, ///< [in] A sequence of \p num_values data to reduce - SegmentOffsetIterator d_segment_offsets, ///< [in] A sequence of (\p num_segments + 1) segment offsets - OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals - OffsetT num_values, ///< [in] Total number of values to reduce - OffsetT num_segments, ///< [in] Number of segments being reduced - Value identity, ///< [in] Identity value (for zero-length segments) - ReductionOp reduction_op, ///< [in] Reduction operator - cudaStream_t stream, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - cudaError error = cudaSuccess; - do - { - // Get PTX version - int ptx_version; - #if (CUB_PTX_ARCH == 0) - if (CubDebug(error = PtxVersion(ptx_version))) break; - #else - ptx_version = CUB_PTX_ARCH; - #endif - - // Get kernel kernel dispatch configurations - SegReduceKernelConfig seg_reduce_region_config; - SegReduceByKeyKernelConfig seg_reduce_region_by_key_config; - - InitConfigs(ptx_version, seg_reduce_region_config, seg_reduce_region_by_key_config); - - // Dispatch - if (CubDebug(error = Dispatch( - d_temp_storage, - temp_storage_bytes, - d_values, - d_segment_offsets, - d_output, - num_values, - num_segments, - identity, - reduction_op, - stream, - debug_synchronous, - ptx_version, // Use PTX version instead of SM version because, as a statically known quantity, this improves device-side launch dramatically but at the risk of imprecise occupancy calculation for mismatches - SegReducePartitionKernel, - SegReduceRegionKernel, - SegReduceRegionByKeyKernel, - seg_reduce_region_config, - seg_reduce_region_by_key_config))) break; - } - while (0); - - return error; - - } -}; - - - - -/****************************************************************************** - * DeviceSegReduce - *****************************************************************************/ - -/** - * \brief DeviceSegReduce provides operations for computing a device-wide, parallel segmented reduction across a sequence of data items residing within global memory. - * \ingroup DeviceModule - * - * \par Overview - * A reduction (or fold) - * uses a binary combining operator to compute a single aggregate from a list of input elements. - * - * \par Usage Considerations - * \cdp_class{DeviceReduce} - * - */ -struct DeviceSegReduce -{ - /** - * \brief Computes a device-wide segmented reduction using the specified binary \p reduction_op functor. - * - * \par - * Does not support non-commutative reduction operators. - * - * \devicestorage - * - * \cdp - * - * \iterator - * - * \tparam ValueIterator [inferred] Random-access input iterator type for reading values - * \tparam SegmentOffsetIterator [inferred] Random-access input iterator type for reading segment end-offsets - * \tparam OutputIteratorT [inferred] Random-access output iterator type for writing segment reductions - * \tparam Value [inferred] Value type - * \tparam ReductionOp [inferred] Binary reduction operator type having member T operator()(const T &a, const T &b) - */ - template < - typename ValueIterator, - typename SegmentOffsetIterator, - typename OutputIteratorT, - typename Value, - typename ReductionOp> - __host__ __device__ __forceinline__ - static cudaError_t Reduce( - void* d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation. - ValueIterator d_values, ///< [in] A sequence of \p num_values data to reduce - SegmentOffsetIterator d_segment_offsets, ///< [in] A sequence of (\p num_segments + 1) segment offsets - OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals - int num_values, ///< [in] Total number of values to reduce - int num_segments, ///< [in] Number of segments being reduced - Value identity, ///< [in] Identity value (for zero-length segments) - ReductionOp reduction_op, ///< [in] Reduction operator - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - typedef DeviceSegReduceDispatch< - ValueIterator, - SegmentOffsetIterator, - OutputIteratorT, - ReductionOp, - OffsetT> - DeviceSegReduceDispatch; - - return DeviceSegReduceDispatch::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_values, - d_segment_offsets, - d_output, - num_values, - num_segments, - identity, - reduction_op, - stream, - debug_synchronous); - } - - - /** - * \brief Computes a device-wide segmented sum using the addition ('+') operator. - * - * \par - * Does not support non-commutative summation. - * - * \devicestorage - * - * \cdp - * - * \iterator - * - * \tparam ValueIterator [inferred] Random-access input iterator type for reading values - * \tparam SegmentOffsetIterator [inferred] Random-access input iterator type for reading segment end-offsets - * \tparam OutputIteratorT [inferred] Random-access output iterator type for writing segment reductions - */ - template < - typename ValueIterator, - typename SegmentOffsetIterator, - typename OutputIteratorT> - __host__ __device__ __forceinline__ - static cudaError_t Sum( - void* d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is returned in \p temp_storage_bytes and no work is done. - size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation. - ValueIterator d_values, ///< [in] A sequence of \p num_values data to reduce - SegmentOffsetIterator d_segment_offsets, ///< [in] A sequence of (\p num_segments + 1) segment offsets - OutputIteratorT d_output, ///< [out] A sequence of \p num_segments segment totals - int num_values, ///< [in] Total number of values to reduce - int num_segments, ///< [in] Number of segments being reduced - cudaStream_t stream = 0, ///< [in] [optional] CUDA stream to launch kernels within. Default is stream0. - bool debug_synchronous = false) ///< [in] [optional] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. - { - // Signed integer type for global offsets - typedef int OffsetT; - - // Value type - typedef typename std::iterator_traits::value_type Value; - - Value identity = Value(); - cub::Sum reduction_op; - - typedef DeviceSegReduceDispatch< - ValueIterator, - SegmentOffsetIterator, - OutputIteratorT, - cub::Sum, - OffsetT> - DeviceSegReduceDispatch; - - return DeviceSegReduceDispatch::Dispatch( - d_temp_storage, - temp_storage_bytes, - d_values, - d_segment_offsets, - d_output, - num_values, - num_segments, - identity, - reduction_op, - stream, - debug_synchronous); - } -}; - - - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - -/** - * Initialize problem - */ -template -void Initialize( - GenMode gen_mode, - Value *h_values, - vector &segment_offsets, - int num_values, - int avg_segment_size) -{ - // Initialize values -// if (g_verbose) printf("Values: "); - for (int i = 0; i < num_values; ++i) - { - InitValue(gen_mode, h_values[i], i); -// if (g_verbose) std::cout << h_values[i] << ", "; - } -// if (g_verbose) printf("\n\n"); - - // Initialize segment lengths - const unsigned int MAX_INTEGER = -1u; - const unsigned int MAX_SEGMENT_LENGTH = avg_segment_size * 2; - const double SCALE_FACTOR = double(MAX_SEGMENT_LENGTH) / double(MAX_INTEGER); - - segment_offsets.push_back(0); - - OffsetT consumed = 0; - OffsetT remaining = num_values; - while (remaining > 0) - { - // Randomly sample a 32-bit unsigned int - unsigned int segment_length; - RandomBits(segment_length); - - // Scale to maximum segment length - segment_length = (unsigned int) (double(segment_length) * SCALE_FACTOR); - segment_length = CUB_MIN(segment_length, remaining); - - consumed += segment_length; - remaining -= segment_length; - - segment_offsets.push_back(consumed); - } -} - - -/** - * Compute reference answer - */ -template -void ComputeReference( - Value *h_values, - OffsetT *h_segment_offsets, - Value *h_reference, - int num_segments, - Value identity) -{ - if (g_verbose) printf("%d segment reductions: ", num_segments); - for (int segment = 0; segment < num_segments; ++segment) - { - h_reference[segment] = identity; - - for (int i = h_segment_offsets[segment]; i < h_segment_offsets[segment + 1]; ++i) - { - h_reference[segment] += h_values[i]; - } - if (g_verbose) std::cout << h_reference[segment] << ", "; - } - if (g_verbose) printf("\n\n"); -} - - -/** - * Simple test of device - */ -template < - bool CDP, - typename OffsetT, - typename Value, - typename ReductionOp> -void Test( - OffsetT num_values, - int avg_segment_size, - ReductionOp reduction_op, - Value identity, - char* type_string) -{ - Value *h_values = NULL; - Value *h_reference = NULL; - OffsetT *h_segment_offsets = NULL; - - printf("%d\n", num_values); - - // Initialize problem on host - h_values = new Value[num_values]; - vector segment_offsets; - Initialize(UNIFORM, h_values, segment_offsets, num_values, avg_segment_size); - - // Allocate simple offsets array and copy STL vector into it - h_segment_offsets = new OffsetT[segment_offsets.size()]; - for (int i = 0; i < segment_offsets.size(); ++i) - h_segment_offsets[i] = segment_offsets[i]; - - OffsetT num_segments = segment_offsets.size() - 1; - if (g_verbose) - { - printf("%d segment offsets: ", num_segments); - for (int i = 0; i < num_segments; ++i) - std::cout << h_segment_offsets[i] << "(" << h_segment_offsets[i + 1] - h_segment_offsets[i] << "), "; - if (g_verbose) std::cout << std::endl << std::endl; - } - - // Solve problem on host - h_reference = new Value[num_segments]; - ComputeReference(h_values, h_segment_offsets, h_reference, num_segments, identity); - - printf("\n\n%s cub::DeviceSegReduce::%s %d items (%d-byte %s), %d segments (%d-byte offset indices)\n", - (CDP) ? "CDP device invoked" : "Host-invoked", - (Equals::VALUE) ? "Sum" : "Reduce", - num_values, (int) sizeof(Value), type_string, - num_segments, (int) sizeof(OffsetT)); - fflush(stdout); - - // Allocate and initialize problem on device - Value *d_values = NULL; - OffsetT *d_segment_offsets = NULL; - Value *d_output = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values, sizeof(Value) * num_values)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (num_segments + 1))); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_output, sizeof(Value) * num_segments)); - CubDebugExit(cudaMemcpy(d_values, h_values, sizeof(Value) * num_values, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), cudaMemcpyHostToDevice)); - - // Request and allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(DeviceSegReduce::Sum(d_temp_storage, temp_storage_bytes, d_values, d_segment_offsets, d_output, num_values, num_segments, 0, false)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Clear device output - CubDebugExit(cudaMemset(d_output, 0, sizeof(Value) * num_segments)); - - // Run warmup/correctness iteration - CubDebugExit(DeviceSegReduce::Sum(d_temp_storage, temp_storage_bytes, d_values, d_segment_offsets, d_output, num_values, num_segments, 0, true)); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults(h_reference, d_output, num_segments, true, g_verbose); - printf("\t%s", compare ? "FAIL" : "PASS"); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Performance - GpuTimer gpu_timer; - gpu_timer.Start(); - for (int i = 0; i < g_timing_iterations; ++i) - { - CubDebugExit(DeviceSegReduce::Sum(d_temp_storage, temp_storage_bytes, d_values, d_segment_offsets, d_output, num_values, num_segments, 0, false)); - } - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - // Display performance - if (g_timing_iterations > 0) - { - float avg_millis = elapsed_millis / g_timing_iterations; - float giga_rate = float(num_values) / avg_millis / 1000.0 / 1000.0; - float giga_bandwidth = giga_rate * - printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s", avg_millis, giga_rate, giga_bandwidth); - } - - // Device cleanup - if (d_values) CubDebugExit(g_allocator.DeviceFree(d_values)); - if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets)); - if (d_output) CubDebugExit(g_allocator.DeviceFree(d_output)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - // Host cleanup - if (h_values) delete[] h_values; - if (h_segment_offsets) delete[] h_segment_offsets; - if (h_reference) delete[] h_reference; -} - - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_values = 32 * 1024 * 1024; - int avg_segment_size = 500; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_values); - args.GetCmdLineArgument("ss", avg_segment_size); - args.GetCmdLineArgument("i", g_timing_iterations); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--v] " - "[--i=] " - "[--n=]\n" - "[--ss=]\n" - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - Test((int) num_values, avg_segment_size, Sum(), (long long) 0, CUB_TYPE_STRING(long long)); - - return 0; -} - - - diff --git a/ml-xgboost/cub/experimental/histogram/histogram_cub.h b/ml-xgboost/cub/experimental/histogram/histogram_cub.h deleted file mode 100644 index 52556fc..0000000 --- a/ml-xgboost/cub/experimental/histogram/histogram_cub.h +++ /dev/null @@ -1,109 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -#include - -using namespace cub; - -template < - int NUM_CHANNELS, - int ACTIVE_CHANNELS, - int NUM_BINS, - typename PixelType> -double run_cub_histogram( - PixelType *d_image, - int width, - int height, - unsigned int *d_hist, - bool is_warmup) -{ - enum { - is_float = Equals::VALUE, - }; - - typedef typename If::Type SampleT; // Sample type - typedef typename If::Type LevelT; // Level type (uint32 for uchar) - - // Setup data structures - unsigned int* d_histogram[ACTIVE_CHANNELS]; - int num_levels[ACTIVE_CHANNELS]; ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT lower_level[ACTIVE_CHANNELS]; ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - LevelT upper_level[ACTIVE_CHANNELS]; ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. - - for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) - { - d_histogram[CHANNEL] = d_hist + (CHANNEL * NUM_BINS); - num_levels[CHANNEL] = NUM_BINS + 1; - lower_level[CHANNEL] = 0; - upper_level[CHANNEL] = (is_float) ? 1 : 256; - } - - // Allocate temporary storage - size_t temp_storage_bytes = 0; - void *d_temp_storage = NULL; - - SampleT* d_image_samples = (SampleT*) d_image; - - // Get amount of temporary storage needed - DeviceHistogram::MultiHistogramEven( - d_temp_storage, - temp_storage_bytes, - d_image_samples, - d_histogram, - num_levels, - lower_level, - upper_level, - width * height, - (cudaStream_t) 0, - is_warmup); - - cudaMalloc(&d_temp_storage, temp_storage_bytes); - - GpuTimer gpu_timer; - gpu_timer.Start(); - - // Compute histogram - DeviceHistogram::MultiHistogramEven( - d_temp_storage, - temp_storage_bytes, - d_image_samples, - d_histogram, - num_levels, - lower_level, - upper_level, - width * height, - (cudaStream_t) 0, - is_warmup); - - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - cudaFree(d_temp_storage); - - return elapsed_millis; -} - diff --git a/ml-xgboost/cub/experimental/histogram/histogram_gmem_atomics.h b/ml-xgboost/cub/experimental/histogram/histogram_gmem_atomics.h deleted file mode 100644 index cfe9247..0000000 --- a/ml-xgboost/cub/experimental/histogram/histogram_gmem_atomics.h +++ /dev/null @@ -1,185 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -#include - -namespace histogram_gmem_atomics -{ - // Decode float4 pixel into bins - template - __device__ __forceinline__ void DecodePixel(float4 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) - { - float* samples = reinterpret_cast(&pixel); - - #pragma unroll - for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) - bins[CHANNEL] = (unsigned int) (samples[CHANNEL] * float(NUM_BINS)); - } - - // Decode uchar4 pixel into bins - template - __device__ __forceinline__ void DecodePixel(uchar4 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) - { - unsigned char* samples = reinterpret_cast(&pixel); - - #pragma unroll - for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) - bins[CHANNEL] = (unsigned int) (samples[CHANNEL]); - } - - // Decode uchar1 pixel into bins - template - __device__ __forceinline__ void DecodePixel(uchar1 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) - { - bins[0] = (unsigned int) pixel.x; - } - - // First-pass histogram kernel (binning into privatized counters) - template < - int NUM_PARTS, - int ACTIVE_CHANNELS, - int NUM_BINS, - typename PixelType> - __global__ void histogram_gmem_atomics( - const PixelType *in, - int width, - int height, - unsigned int *out) - { - // global position and size - int x = blockIdx.x * blockDim.x + threadIdx.x; - int y = blockIdx.y * blockDim.y + threadIdx.y; - int nx = blockDim.x * gridDim.x; - int ny = blockDim.y * gridDim.y; - - // threads in workgroup - int t = threadIdx.x + threadIdx.y * blockDim.x; // thread index in workgroup, linear in 0..nt-1 - int nt = blockDim.x * blockDim.y; // total threads in workgroup - - // group index in 0..ngroups-1 - int g = blockIdx.x + blockIdx.y * gridDim.x; - - // initialize smem - unsigned int *gmem = out + g * NUM_PARTS; - for (int i = t; i < ACTIVE_CHANNELS * NUM_BINS; i += nt) - gmem[i] = 0; - __syncthreads(); - - // process pixels (updates our group's partial histogram in gmem) - for (int col = x; col < width; col += nx) - { - for (int row = y; row < height; row += ny) - { - PixelType pixel = in[row * width + col]; - - unsigned int bins[ACTIVE_CHANNELS]; - DecodePixel(pixel, bins); - - #pragma unroll - for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) - atomicAdd(&gmem[(NUM_BINS * CHANNEL) + bins[CHANNEL]], 1); - } - } - } - - // Second pass histogram kernel (accumulation) - template < - int NUM_PARTS, - int ACTIVE_CHANNELS, - int NUM_BINS> - __global__ void histogram_gmem_accum( - const unsigned int *in, - int n, - unsigned int *out) - { - int i = blockIdx.x * blockDim.x + threadIdx.x; - if (i > ACTIVE_CHANNELS * NUM_BINS) - return; // out of range - - unsigned int total = 0; - for (int j = 0; j < n; j++) - total += in[i + NUM_PARTS * j]; - - out[i] = total; - } - - -} // namespace histogram_gmem_atomics - - -template < - int ACTIVE_CHANNELS, - int NUM_BINS, - typename PixelType> -double run_gmem_atomics( - PixelType *d_image, - int width, - int height, - unsigned int *d_hist, - bool warmup) -{ - enum - { - NUM_PARTS = 1024 - }; - - cudaDeviceProp props; - cudaGetDeviceProperties(&props, 0); - - dim3 block(32, 4); - dim3 grid(16, 16); - int total_blocks = grid.x * grid.y; - - // allocate partial histogram - unsigned int *d_part_hist; - cudaMalloc(&d_part_hist, total_blocks * NUM_PARTS * sizeof(unsigned int)); - - dim3 block2(128); - dim3 grid2((3 * NUM_BINS + block.x - 1) / block.x); - - GpuTimer gpu_timer; - gpu_timer.Start(); - - histogram_gmem_atomics::histogram_gmem_atomics<<>>( - d_image, - width, - height, - d_part_hist); - - histogram_gmem_atomics::histogram_gmem_accum<<>>( - d_part_hist, - total_blocks, - d_hist); - - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - cudaFree(d_part_hist); - - return elapsed_millis; -} - diff --git a/ml-xgboost/cub/experimental/histogram/histogram_smem_atomics.h b/ml-xgboost/cub/experimental/histogram/histogram_smem_atomics.h deleted file mode 100644 index 2addba1..0000000 --- a/ml-xgboost/cub/experimental/histogram/histogram_smem_atomics.h +++ /dev/null @@ -1,195 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -#include - -namespace histogram_smem_atomics -{ - // Decode float4 pixel into bins - template - __device__ __forceinline__ void DecodePixel(float4 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) - { - float* samples = reinterpret_cast(&pixel); - - #pragma unroll - for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) - bins[CHANNEL] = (unsigned int) (samples[CHANNEL] * float(NUM_BINS)); - } - - // Decode uchar4 pixel into bins - template - __device__ __forceinline__ void DecodePixel(uchar4 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) - { - unsigned char* samples = reinterpret_cast(&pixel); - - #pragma unroll - for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) - bins[CHANNEL] = (unsigned int) (samples[CHANNEL]); - } - - // Decode uchar1 pixel into bins - template - __device__ __forceinline__ void DecodePixel(uchar1 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) - { - bins[0] = (unsigned int) pixel.x; - } - - // First-pass histogram kernel (binning into privatized counters) - template < - int NUM_PARTS, - int ACTIVE_CHANNELS, - int NUM_BINS, - typename PixelType> - __global__ void histogram_smem_atomics( - const PixelType *in, - int width, - int height, - unsigned int *out) - { - // global position and size - int x = blockIdx.x * blockDim.x + threadIdx.x; - int y = blockIdx.y * blockDim.y + threadIdx.y; - int nx = blockDim.x * gridDim.x; - int ny = blockDim.y * gridDim.y; - - // threads in workgroup - int t = threadIdx.x + threadIdx.y * blockDim.x; // thread index in workgroup, linear in 0..nt-1 - int nt = blockDim.x * blockDim.y; // total threads in workgroup - - // group index in 0..ngroups-1 - int g = blockIdx.x + blockIdx.y * gridDim.x; - - // initialize smem - __shared__ unsigned int smem[ACTIVE_CHANNELS * NUM_BINS + 3]; - for (int i = t; i < ACTIVE_CHANNELS * NUM_BINS + 3; i += nt) - smem[i] = 0; - __syncthreads(); - - // process pixels - // updates our group's partial histogram in smem - for (int col = x; col < width; col += nx) - { - for (int row = y; row < height; row += ny) - { - PixelType pixel = in[row * width + col]; - - unsigned int bins[ACTIVE_CHANNELS]; - DecodePixel(pixel, bins); - - #pragma unroll - for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) - atomicAdd(&smem[(NUM_BINS * CHANNEL) + bins[CHANNEL] + CHANNEL], 1); - } - } - - __syncthreads(); - - // move to our workgroup's slice of output - out += g * NUM_PARTS; - - // store local output to global - for (int i = t; i < NUM_BINS; i += nt) - { - #pragma unroll - for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) - out[i + NUM_BINS * CHANNEL] = smem[i + NUM_BINS * CHANNEL + CHANNEL]; - } - } - - // Second pass histogram kernel (accumulation) - template < - int NUM_PARTS, - int ACTIVE_CHANNELS, - int NUM_BINS> - __global__ void histogram_smem_accum( - const unsigned int *in, - int n, - unsigned int *out) - { - int i = blockIdx.x * blockDim.x + threadIdx.x; - if (i > ACTIVE_CHANNELS * NUM_BINS) return; // out of range - unsigned int total = 0; - for (int j = 0; j < n; j++) - total += in[i + NUM_PARTS * j]; - out[i] = total; - } - -} // namespace histogram_smem_atomics - - -template < - int ACTIVE_CHANNELS, - int NUM_BINS, - typename PixelType> -double run_smem_atomics( - PixelType *d_image, - int width, - int height, - unsigned int *d_hist, - bool warmup) -{ - enum - { - NUM_PARTS = 1024 - }; - - cudaDeviceProp props; - cudaGetDeviceProperties(&props, 0); - - dim3 block(32, 4); - dim3 grid(16, 16); - int total_blocks = grid.x * grid.y; - - // allocate partial histogram - unsigned int *d_part_hist; - cudaMalloc(&d_part_hist, total_blocks * NUM_PARTS * sizeof(unsigned int)); - - dim3 block2(128); - dim3 grid2((ACTIVE_CHANNELS * NUM_BINS + block.x - 1) / block.x); - - GpuTimer gpu_timer; - gpu_timer.Start(); - - histogram_smem_atomics::histogram_smem_atomics<<>>( - d_image, - width, - height, - d_part_hist); - - histogram_smem_atomics::histogram_smem_accum<<>>( - d_part_hist, - total_blocks, - d_hist); - - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - cudaFree(d_part_hist); - - return elapsed_millis; -} - diff --git a/ml-xgboost/cub/experimental/histogram_compare.cu b/ml-xgboost/cub/experimental/histogram_compare.cu deleted file mode 100644 index 0bda006..0000000 --- a/ml-xgboost/cub/experimental/histogram_compare.cu +++ /dev/null @@ -1,635 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -#include -#include -#include -#include -#include -#include - -#include "histogram/histogram_gmem_atomics.h" -#include "histogram/histogram_smem_atomics.h" -#include "histogram/histogram_cub.h" - -#include -#include - -using namespace cub; - -//--------------------------------------------------------------------- -// Globals, constants, and type declarations -//--------------------------------------------------------------------- - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -bool g_verbose = false; // Whether to display input/output to console -bool g_report = false; // Whether to display a full report in CSV format -CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory - -struct less_than_value -{ - inline bool operator()( - const std::pair &a, - const std::pair &b) - { - return a.second < b.second; - } -}; - - -//--------------------------------------------------------------------- -// Targa (.tga) image file parsing -//--------------------------------------------------------------------- - -/** - * TGA image header info - */ -struct TgaHeader -{ - char idlength; - char colormaptype; - char datatypecode; - short colormaporigin; - short colormaplength; - char colormapdepth; - short x_origin; - short y_origin; - short width; - short height; - char bitsperpixel; - char imagedescriptor; - - void Parse (FILE *fptr) - { - idlength = fgetc(fptr); - colormaptype = fgetc(fptr); - datatypecode = fgetc(fptr); - fread(&colormaporigin, 2, 1, fptr); - fread(&colormaplength, 2, 1, fptr); - colormapdepth = fgetc(fptr); - fread(&x_origin, 2, 1, fptr); - fread(&y_origin, 2, 1, fptr); - fread(&width, 2, 1, fptr); - fread(&height, 2, 1, fptr); - bitsperpixel = fgetc(fptr); - imagedescriptor = fgetc(fptr); - } - - void Display (FILE *fptr) - { - fprintf(fptr, "ID length: %d\n", idlength); - fprintf(fptr, "Color map type: %d\n", colormaptype); - fprintf(fptr, "Image type: %d\n", datatypecode); - fprintf(fptr, "Color map offset: %d\n", colormaporigin); - fprintf(fptr, "Color map length: %d\n", colormaplength); - fprintf(fptr, "Color map depth: %d\n", colormapdepth); - fprintf(fptr, "X origin: %d\n", x_origin); - fprintf(fptr, "Y origin: %d\n", y_origin); - fprintf(fptr, "Width: %d\n", width); - fprintf(fptr, "Height: %d\n", height); - fprintf(fptr, "Bits per pixel: %d\n", bitsperpixel); - fprintf(fptr, "Descriptor: %d\n", imagedescriptor); - } -}; - - -/** - * Decode image byte data into pixel - */ -void ParseTgaPixel(uchar4 &pixel, unsigned char *tga_pixel, int bytes) -{ - if (bytes == 4) - { - pixel.x = tga_pixel[2]; - pixel.y = tga_pixel[1]; - pixel.z = tga_pixel[0]; - pixel.w = tga_pixel[3]; - } - else if (bytes == 3) - { - pixel.x = tga_pixel[2]; - pixel.y = tga_pixel[1]; - pixel.z = tga_pixel[0]; - pixel.w = 0; - } - else if (bytes == 2) - { - pixel.x = (tga_pixel[1] & 0x7c) << 1; - pixel.y = ((tga_pixel[1] & 0x03) << 6) | ((tga_pixel[0] & 0xe0) >> 2); - pixel.z = (tga_pixel[0] & 0x1f) << 3; - pixel.w = (tga_pixel[1] & 0x80); - } -} - - -/** - * Reads a .tga image file - */ -void ReadTga(uchar4* &pixels, int &width, int &height, const char *filename) -{ - // Open the file - FILE *fptr; - if ((fptr = fopen(filename, "rb")) == NULL) - { - fprintf(stderr, "File open failed\n"); - exit(-1); - } - - // Parse header - TgaHeader header; - header.Parse(fptr); -// header.Display(stdout); - width = header.width; - height = header.height; - - // Verify compatibility - if (header.datatypecode != 2 && header.datatypecode != 10) - { - fprintf(stderr, "Can only handle image type 2 and 10\n"); - exit(-1); - } - if (header.bitsperpixel != 16 && header.bitsperpixel != 24 && header.bitsperpixel != 32) - { - fprintf(stderr, "Can only handle pixel depths of 16, 24, and 32\n"); - exit(-1); - } - if (header.colormaptype != 0 && header.colormaptype != 1) - { - fprintf(stderr, "Can only handle color map types of 0 and 1\n"); - exit(-1); - } - - // Skip unnecessary header info - int skip_bytes = header.idlength + (header.colormaptype * header.colormaplength); - fseek(fptr, skip_bytes, SEEK_CUR); - - // Read the image - int pixel_bytes = header.bitsperpixel / 8; - - // Allocate and initialize pixel data - size_t image_bytes = width * height * sizeof(uchar4); - if ((pixels == NULL) && ((pixels = (uchar4*) malloc(image_bytes)) == NULL)) - { - fprintf(stderr, "malloc of image failed\n"); - exit(-1); - } - memset(pixels, 0, image_bytes); - - // Parse pixels - unsigned char tga_pixel[5]; - int current_pixel = 0; - while (current_pixel < header.width * header.height) - { - if (header.datatypecode == 2) - { - // Uncompressed - if (fread(tga_pixel, 1, pixel_bytes, fptr) != pixel_bytes) - { - fprintf(stderr, "Unexpected end of file at pixel %d (uncompressed)\n", current_pixel); - exit(-1); - } - ParseTgaPixel(pixels[current_pixel], tga_pixel, pixel_bytes); - current_pixel++; - } - else if (header.datatypecode == 10) - { - // Compressed - if (fread(tga_pixel, 1, pixel_bytes + 1, fptr) != pixel_bytes + 1) - { - fprintf(stderr, "Unexpected end of file at pixel %d (compressed)\n", current_pixel); - exit(-1); - } - int run_length = tga_pixel[0] & 0x7f; - ParseTgaPixel(pixels[current_pixel], &(tga_pixel[1]), pixel_bytes); - current_pixel++; - - if (tga_pixel[0] & 0x80) - { - // RLE chunk - for (int i = 0; i < run_length; i++) - { - ParseTgaPixel(pixels[current_pixel], &(tga_pixel[1]), pixel_bytes); - current_pixel++; - } - } - else - { - // Normal chunk - for (int i = 0; i < run_length; i++) - { - if (fread(tga_pixel, 1, pixel_bytes, fptr) != pixel_bytes) - { - fprintf(stderr, "Unexpected end of file at pixel %d (normal)\n", current_pixel); - exit(-1); - } - ParseTgaPixel(pixels[current_pixel], tga_pixel, pixel_bytes); - current_pixel++; - } - } - } - } - - // Close file - fclose(fptr); -} - - - -//--------------------------------------------------------------------- -// Random image generation -//--------------------------------------------------------------------- - -/** - * Generate a random image with specified entropy - */ -void GenerateRandomImage(uchar4* &pixels, int width, int height, int entropy_reduction) -{ - int num_pixels = width * height; - size_t image_bytes = num_pixels * sizeof(uchar4); - if ((pixels == NULL) && ((pixels = (uchar4*) malloc(image_bytes)) == NULL)) - { - fprintf(stderr, "malloc of image failed\n"); - exit(-1); - } - - for (int i = 0; i < num_pixels; ++i) - { - RandomBits(pixels[i].x, entropy_reduction); - RandomBits(pixels[i].y, entropy_reduction); - RandomBits(pixels[i].z, entropy_reduction); - RandomBits(pixels[i].w, entropy_reduction); - } -} - - - -//--------------------------------------------------------------------- -// Histogram verification -//--------------------------------------------------------------------- - -// Decode float4 pixel into bins -template -void DecodePixelGold(float4 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) -{ - float* samples = reinterpret_cast(&pixel); - - for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) - bins[CHANNEL] = (unsigned int) (samples[CHANNEL] * float(NUM_BINS)); -} - -// Decode uchar4 pixel into bins -template -void DecodePixelGold(uchar4 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) -{ - unsigned char* samples = reinterpret_cast(&pixel); - - for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) - bins[CHANNEL] = (unsigned int) (samples[CHANNEL]); -} - -// Decode uchar1 pixel into bins -template -void DecodePixelGold(uchar1 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) -{ - bins[0] = (unsigned int) pixel.x; -} - - -// Compute reference histogram. Specialized for uchar4 -template < - int ACTIVE_CHANNELS, - int NUM_BINS, - typename PixelType> -void HistogramGold(PixelType *image, int width, int height, unsigned int* hist) -{ - memset(hist, 0, ACTIVE_CHANNELS * NUM_BINS * sizeof(unsigned int)); - - for (int i = 0; i < width; i++) - { - for (int j = 0; j < height; j++) - { - PixelType pixel = image[i + j * width]; - - unsigned int bins[ACTIVE_CHANNELS]; - DecodePixelGold(pixel, bins); - - for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) - { - hist[(NUM_BINS * CHANNEL) + bins[CHANNEL]]++; - } - } - } -} - - -//--------------------------------------------------------------------- -// Test execution -//--------------------------------------------------------------------- - -/** - * Run a specific histogram implementation - */ -template < - int ACTIVE_CHANNELS, - int NUM_BINS, - typename PixelType> -void RunTest( - std::vector >& timings, - PixelType* d_pixels, - const int width, - const int height, - unsigned int * d_hist, - unsigned int * h_hist, - int timing_iterations, - const char * long_name, - const char * short_name, - double (*f)(PixelType*, int, int, unsigned int*, bool)) -{ - if (!g_report) printf("%s ", long_name); fflush(stdout); - - // Run single test to verify (and code cache) - (*f)(d_pixels, width, height, d_hist, !g_report); - - int compare = CompareDeviceResults(h_hist, d_hist, ACTIVE_CHANNELS * NUM_BINS, true, g_verbose); - if (!g_report) printf("\t%s\n", compare ? "FAIL" : "PASS"); fflush(stdout); - - double elapsed_ms = 0; - for (int i = 0; i < timing_iterations; i++) - { - elapsed_ms += (*f)(d_pixels, width, height, d_hist, false); - } - double avg_us = (elapsed_ms / timing_iterations) * 1000; // average in us - timings.push_back(std::pair(short_name, avg_us)); - - if (!g_report) - { - printf("Avg time %.3f us (%d iterations)\n", avg_us, timing_iterations); fflush(stdout); - } - else - { - printf("%.3f, ", avg_us); fflush(stdout); - } - - AssertEquals(0, compare); -} - - -/** - * Evaluate corpus of histogram implementations - */ -template < - int NUM_CHANNELS, - int ACTIVE_CHANNELS, - int NUM_BINS, - typename PixelType> -void TestMethods( - PixelType* h_pixels, - int height, - int width, - int timing_iterations, - double bandwidth_GBs) -{ - // Copy data to gpu - PixelType* d_pixels; - size_t pixel_bytes = width * height * sizeof(PixelType); - CubDebugExit(g_allocator.DeviceAllocate((void**) &d_pixels, pixel_bytes)); - CubDebugExit(cudaMemcpy(d_pixels, h_pixels, pixel_bytes, cudaMemcpyHostToDevice)); - - if (g_report) printf("%.3f, ", double(pixel_bytes) / bandwidth_GBs / 1000); - - // Allocate results arrays on cpu/gpu - unsigned int *h_hist; - unsigned int *d_hist; - size_t histogram_bytes = NUM_BINS * ACTIVE_CHANNELS * sizeof(unsigned int); - h_hist = (unsigned int *) malloc(histogram_bytes); - g_allocator.DeviceAllocate((void **) &d_hist, histogram_bytes); - - // Compute reference cpu histogram - HistogramGold(h_pixels, width, height, h_hist); - - // Store timings - std::vector > timings; - - // Run experiments - RunTest(timings, d_pixels, width, height, d_hist, h_hist, timing_iterations, - "CUB", "CUB", run_cub_histogram); - RunTest(timings, d_pixels, width, height, d_hist, h_hist, timing_iterations, - "Shared memory atomics", "smem atomics", run_smem_atomics); - RunTest(timings, d_pixels, width, height, d_hist, h_hist, timing_iterations, - "Global memory atomics", "gmem atomics", run_gmem_atomics); - - // Report timings - if (!g_report) - { - std::sort(timings.begin(), timings.end(), less_than_value()); - printf("Timings (us):\n"); - for (int i = 0; i < timings.size(); i++) - { - double bandwidth = height * width * sizeof(PixelType) / timings[i].second / 1000; - printf("\t %.3f %s (%.3f GB/s, %.3f%% peak)\n", timings[i].second, timings[i].first.c_str(), bandwidth, bandwidth / bandwidth_GBs * 100); - } - printf("\n"); - } - - // Free data - CubDebugExit(g_allocator.DeviceFree(d_pixels)); - CubDebugExit(g_allocator.DeviceFree(d_hist)); - free(h_hist); -} - - -/** - * Test different problem genres - */ -void TestGenres( - uchar4* uchar4_pixels, - int height, - int width, - int timing_iterations, - double bandwidth_GBs) -{ - int num_pixels = width * height; - - { - if (!g_report) printf("1 channel uchar1 tests (256-bin):\n\n"); fflush(stdout); - - size_t image_bytes = num_pixels * sizeof(uchar1); - uchar1* uchar1_pixels = (uchar1*) malloc(image_bytes); - - // Convert to 1-channel (averaging first 3 channels) - for (int i = 0; i < num_pixels; ++i) - { - uchar1_pixels[i].x = (unsigned char) - (((unsigned int) uchar4_pixels[i].x + - (unsigned int) uchar4_pixels[i].y + - (unsigned int) uchar4_pixels[i].z) / 3); - } - - TestMethods<1, 1, 256>(uchar1_pixels, width, height, timing_iterations, bandwidth_GBs); - free(uchar1_pixels); - if (g_report) printf(", "); - } - - { - if (!g_report) printf("3/4 channel uchar4 tests (256-bin):\n\n"); fflush(stdout); - TestMethods<4, 3, 256>(uchar4_pixels, width, height, timing_iterations, bandwidth_GBs); - if (g_report) printf(", "); - } - - { - if (!g_report) printf("3/4 channel float4 tests (256-bin):\n\n"); fflush(stdout); - size_t image_bytes = num_pixels * sizeof(float4); - float4* float4_pixels = (float4*) malloc(image_bytes); - - // Convert to float4 with range [0.0, 1.0) - for (int i = 0; i < num_pixels; ++i) - { - float4_pixels[i].x = float(uchar4_pixels[i].x) / 256; - float4_pixels[i].y = float(uchar4_pixels[i].y) / 256; - float4_pixels[i].z = float(uchar4_pixels[i].z) / 256; - float4_pixels[i].w = float(uchar4_pixels[i].w) / 256; - } - TestMethods<4, 3, 256>(float4_pixels, width, height, timing_iterations, bandwidth_GBs); - free(float4_pixels); - if (g_report) printf("\n"); - } -} - - -/** - * Main - */ -int main(int argc, char **argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - if (args.CheckCmdLineFlag("help")) - { - printf( - "%s " - "[--device=] " - "[--v] " - "[--i=] " - "\n\t" - "--file=<.tga filename> " - "\n\t" - "--entropy=<-1 (0%), 0 (100%), 1 (81%), 2 (54%), 3 (34%), 4 (20%), ..." - "[--height=] " - "[--width=] " - "\n", argv[0]); - exit(0); - } - - std::string filename; - int timing_iterations = 100; - int entropy_reduction = 0; - int height = 1080; - int width = 1920; - - g_verbose = args.CheckCmdLineFlag("v"); - g_report = args.CheckCmdLineFlag("report"); - args.GetCmdLineArgument("i", timing_iterations); - args.GetCmdLineArgument("file", filename); - args.GetCmdLineArgument("height", height); - args.GetCmdLineArgument("width", width); - args.GetCmdLineArgument("entropy", entropy_reduction); - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Get GPU device bandwidth (GB/s) - int device_ordinal, bus_width, mem_clock_khz; - CubDebugExit(cudaGetDevice(&device_ordinal)); - CubDebugExit(cudaDeviceGetAttribute(&bus_width, cudaDevAttrGlobalMemoryBusWidth, device_ordinal)); - CubDebugExit(cudaDeviceGetAttribute(&mem_clock_khz, cudaDevAttrMemoryClockRate, device_ordinal)); - double bandwidth_GBs = double(bus_width) * mem_clock_khz * 2 / 8 / 1000 / 1000; - - // Run test(s) - uchar4* uchar4_pixels = NULL; - if (!g_report) - { - if (!filename.empty()) - { - // Parse targa file - ReadTga(uchar4_pixels, width, height, filename.c_str()); - printf("File %s: width(%d) height(%d)\n\n", filename.c_str(), width, height); fflush(stdout); - } - else - { - // Generate image - GenerateRandomImage(uchar4_pixels, width, height, entropy_reduction); - printf("Random image: entropy-reduction(%d) width(%d) height(%d)\n\n", entropy_reduction, width, height); fflush(stdout); - } - - TestGenres(uchar4_pixels, height, width, timing_iterations, bandwidth_GBs); - } - else - { - // Run test suite - printf("Test, MIN, RLE CUB, SMEM, GMEM, , MIN, RLE_CUB, SMEM, GMEM, , MIN, RLE_CUB, SMEM, GMEM\n"); - - // Entropy reduction tests - for (entropy_reduction = 0; entropy_reduction < 5; ++entropy_reduction) - { - printf("entropy reduction %d, ", entropy_reduction); - GenerateRandomImage(uchar4_pixels, width, height, entropy_reduction); - TestGenres(uchar4_pixels, height, width, timing_iterations, bandwidth_GBs); - } - printf("entropy reduction -1, "); - GenerateRandomImage(uchar4_pixels, width, height, -1); - TestGenres(uchar4_pixels, height, width, timing_iterations, bandwidth_GBs); - printf("\n"); - - // File image tests - std::vector file_tests; - file_tests.push_back("animals"); - file_tests.push_back("apples"); - file_tests.push_back("sunset"); - file_tests.push_back("cheetah"); - file_tests.push_back("nature"); - file_tests.push_back("operahouse"); - file_tests.push_back("austin"); - file_tests.push_back("cityscape"); - - for (int i = 0; i < file_tests.size(); ++i) - { - printf("%s, ", file_tests[i].c_str()); - std::string filename = std::string("histogram/benchmark/") + file_tests[i] + ".tga"; - ReadTga(uchar4_pixels, width, height, filename.c_str()); - TestGenres(uchar4_pixels, height, width, timing_iterations, bandwidth_GBs); - } - } - - free(uchar4_pixels); - - CubDebugExit(cudaDeviceSynchronize()); - printf("\n\n"); - - return 0; -} diff --git a/ml-xgboost/cub/experimental/sparse_matrix.h b/ml-xgboost/cub/experimental/sparse_matrix.h deleted file mode 100644 index 7681c64..0000000 --- a/ml-xgboost/cub/experimental/sparse_matrix.h +++ /dev/null @@ -1,1244 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Matrix data structures and parsing logic - ******************************************************************************/ - -#pragma once - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CUB_MKL - #include - #include -#endif - -using namespace std; - -/****************************************************************************** - * COO matrix type - ******************************************************************************/ - -struct GraphStats -{ - int num_rows; - int num_cols; - int num_nonzeros; - - double diag_dist_mean; // mean - double diag_dist_std_dev; // sample std dev - double pearson_r; // coefficient of variation - - double row_length_mean; // mean - double row_length_std_dev; // sample std_dev - double row_length_variation; // coefficient of variation - double row_length_skewness; // skewness - - void Display(bool show_labels = true) - { - if (show_labels) - printf("\n" - "\t num_rows: %d\n" - "\t num_cols: %d\n" - "\t num_nonzeros: %d\n" - "\t diag_dist_mean: %.2f\n" - "\t diag_dist_std_dev: %.2f\n" - "\t pearson_r: %f\n" - "\t row_length_mean: %.5f\n" - "\t row_length_std_dev: %.5f\n" - "\t row_length_variation: %.5f\n" - "\t row_length_skewness: %.5f\n", - num_rows, - num_cols, - num_nonzeros, - diag_dist_mean, - diag_dist_std_dev, - pearson_r, - row_length_mean, - row_length_std_dev, - row_length_variation, - row_length_skewness); - else - printf( - "%d, " - "%d, " - "%d, " - "%.2f, " - "%.2f, " - "%f, " - "%.5f, " - "%.5f, " - "%.5f, " - "%.5f, ", - num_rows, - num_cols, - num_nonzeros, - diag_dist_mean, - diag_dist_std_dev, - pearson_r, - row_length_mean, - row_length_std_dev, - row_length_variation, - row_length_skewness); - } -}; - - - -/****************************************************************************** - * COO matrix type - ******************************************************************************/ - - -/** - * COO matrix type. A COO matrix is just a vector of edge tuples. Tuples are sorted - * first by row, then by column. - */ -template -struct CooMatrix -{ - //--------------------------------------------------------------------- - // Type definitions and constants - //--------------------------------------------------------------------- - - // COO edge tuple - struct CooTuple - { - OffsetT row; - OffsetT col; - ValueT val; - - CooTuple() {} - CooTuple(OffsetT row, OffsetT col) : row(row), col(col) {} - CooTuple(OffsetT row, OffsetT col, ValueT val) : row(row), col(col), val(val) {} - - /** - * Comparator for sorting COO sparse format num_nonzeros - */ - bool operator<(const CooTuple &other) const - { - if ((row < other.row) || ((row == other.row) && (col < other.col))) - { - return true; - } - - return false; - } - }; - - - //--------------------------------------------------------------------- - // Data members - //--------------------------------------------------------------------- - - // Fields - int num_rows; - int num_cols; - int num_nonzeros; - CooTuple* coo_tuples; - - //--------------------------------------------------------------------- - // Methods - //--------------------------------------------------------------------- - - // Constructor - CooMatrix() : num_rows(0), num_cols(0), num_nonzeros(0), coo_tuples(NULL) {} - - - /** - * Clear - */ - void Clear() - { - if (coo_tuples) delete[] coo_tuples; - coo_tuples = NULL; - } - - - // Destructor - ~CooMatrix() - { - Clear(); - } - - - // Display matrix to stdout - void Display() - { - cout << "COO Matrix (" << num_rows << " rows, " << num_cols << " columns, " << num_nonzeros << " non-zeros):\n"; - cout << "Ordinal, Row, Column, Value\n"; - for (int i = 0; i < num_nonzeros; i++) - { - cout << '\t' << i << ',' << coo_tuples[i].row << ',' << coo_tuples[i].col << ',' << coo_tuples[i].val << "\n"; - } - } - - - /** - * Builds a symmetric COO sparse from an asymmetric CSR matrix. - */ - template - void InitCsrSymmetric(CsrMatrixT &csr_matrix) - { - if (coo_tuples) - { - fprintf(stderr, "Matrix already constructed\n"); - exit(1); - } - - num_rows = csr_matrix.num_cols; - num_cols = csr_matrix.num_rows; - num_nonzeros = csr_matrix.num_nonzeros * 2; - coo_tuples = new CooTuple[num_nonzeros]; - - for (OffsetT row = 0; row < csr_matrix.num_rows; ++row) - { - for (OffsetT nonzero = csr_matrix.row_offsets[row]; nonzero < csr_matrix.row_offsets[row + 1]; ++nonzero) - { - coo_tuples[nonzero].row = row; - coo_tuples[nonzero].col = csr_matrix.column_indices[nonzero]; - coo_tuples[nonzero].val = csr_matrix.values[nonzero]; - - coo_tuples[csr_matrix.num_nonzeros + nonzero].row = coo_tuples[nonzero].col; - coo_tuples[csr_matrix.num_nonzeros + nonzero].col = coo_tuples[nonzero].row; - coo_tuples[csr_matrix.num_nonzeros + nonzero].val = csr_matrix.values[nonzero]; - - } - } - - // Sort by rows, then columns - std::stable_sort(coo_tuples, coo_tuples + num_nonzeros); - } - - /** - * Builds a COO sparse from a relabeled CSR matrix. - */ - template - void InitCsrRelabel(CsrMatrixT &csr_matrix, OffsetT* relabel_indices) - { - if (coo_tuples) - { - fprintf(stderr, "Matrix already constructed\n"); - exit(1); - } - - num_rows = csr_matrix.num_rows; - num_cols = csr_matrix.num_cols; - num_nonzeros = csr_matrix.num_nonzeros; - coo_tuples = new CooTuple[num_nonzeros]; - - for (OffsetT row = 0; row < num_rows; ++row) - { - for (OffsetT nonzero = csr_matrix.row_offsets[row]; nonzero < csr_matrix.row_offsets[row + 1]; ++nonzero) - { - coo_tuples[nonzero].row = relabel_indices[row]; - coo_tuples[nonzero].col = relabel_indices[csr_matrix.column_indices[nonzero]]; - coo_tuples[nonzero].val = csr_matrix.values[nonzero]; - } - } - - // Sort by rows, then columns - std::stable_sort(coo_tuples, coo_tuples + num_nonzeros); - } - - - - /** - * Builds a METIS COO sparse from the given file. - */ - void InitMetis(const string &metis_filename) - { - if (coo_tuples) - { - fprintf(stderr, "Matrix already constructed\n"); - exit(1); - } - - // TODO - } - - - /** - * Builds a MARKET COO sparse from the given file. - */ - void InitMarket( - const string& market_filename, - ValueT default_value = 1.0, - bool verbose = false) - { - if (verbose) { - printf("Reading... "); fflush(stdout); - } - - if (coo_tuples) - { - fprintf(stderr, "Matrix already constructed\n"); - exit(1); - } - - std::ifstream ifs; - ifs.open(market_filename.c_str(), std::ifstream::in); - if (!ifs.good()) - { - fprintf(stderr, "Error opening file\n"); - exit(1); - } - - bool array = false; - bool symmetric = false; - bool skew = false; - int current_edge = -1; - char line[1024]; - - if (verbose) { - printf("Parsing... "); fflush(stdout); - } - - while (true) - { - ifs.getline(line, 1024); - if (!ifs.good()) - { - // Done - break; - } - - if (line[0] == '%') - { - // Comment - if (line[1] == '%') - { - // Banner - symmetric = (strstr(line, "symmetric") != NULL); - skew = (strstr(line, "skew") != NULL); - array = (strstr(line, "array") != NULL); - - if (verbose) { - printf("(symmetric: %d, skew: %d, array: %d) ", symmetric, skew, array); fflush(stdout); - } - } - } - else if (current_edge == -1) - { - // Problem description - int nparsed = sscanf(line, "%d %d %d", &num_rows, &num_cols, &num_nonzeros); - if ((!array) && (nparsed == 3)) - { - if (symmetric) - num_nonzeros *= 2; - - // Allocate coo matrix - coo_tuples = new CooTuple[num_nonzeros]; - current_edge = 0; - - } - else if (array && (nparsed == 2)) - { - // Allocate coo matrix - num_nonzeros = num_rows * num_cols; - coo_tuples = new CooTuple[num_nonzeros]; - current_edge = 0; - } - else - { - fprintf(stderr, "Error parsing MARKET matrix: invalid problem description: %s\n", line); - exit(1); - } - - } - else - { - // Edge - if (current_edge >= num_nonzeros) - { - fprintf(stderr, "Error parsing MARKET matrix: encountered more than %d num_nonzeros\n", num_nonzeros); - exit(1); - } - - int row, col; - double val; - - if (array) - { - if (sscanf(line, "%lf", &val) != 1) - { - fprintf(stderr, "Error parsing MARKET matrix: badly formed current_edge: '%s' at edge %d\n", line, current_edge); - exit(1); - } - col = (current_edge / num_rows); - row = (current_edge - (num_rows * col)); - - coo_tuples[current_edge] = CooTuple(row, col, val); // Convert indices to zero-based - } - else - { - // Parse nonzero (note: using strtol and strtod is 2x faster than sscanf or istream parsing) - char *l = line; - char *t = NULL; - - // parse row - row = strtol(l, &t, 0); - if (t == l) - { - fprintf(stderr, "Error parsing MARKET matrix: badly formed row at edge %d\n", current_edge); - exit(1); - } - l = t; - - // parse col - col = strtol(l, &t, 0); - if (t == l) - { - fprintf(stderr, "Error parsing MARKET matrix: badly formed col at edge %d\n", current_edge); - exit(1); - } - l = t; - - // parse val - val = strtod(l, &t); - if (t == l) - { - val = default_value; - } -/* - int nparsed = sscanf(line, "%d %d %lf", &row, &col, &val); - if (nparsed == 2) - { - // No value specified - val = default_value; - - } - else if (nparsed != 3) - { - fprintf(stderr, "Error parsing MARKET matrix 1: badly formed current_edge: %d parsed at edge %d\n", nparsed, current_edge); - exit(1); - } -*/ - - coo_tuples[current_edge] = CooTuple(row - 1, col - 1, val); // Convert indices to zero-based - - } - - current_edge++; - - if (symmetric && (row != col)) - { - coo_tuples[current_edge].row = coo_tuples[current_edge - 1].col; - coo_tuples[current_edge].col = coo_tuples[current_edge - 1].row; - coo_tuples[current_edge].val = coo_tuples[current_edge - 1].val * (skew ? -1 : 1); - current_edge++; - } - } - } - - // Adjust nonzero count (nonzeros along the diagonal aren't reversed) - num_nonzeros = current_edge; - - if (verbose) { - printf("done. Ordering..."); fflush(stdout); - } - - // Sort by rows, then columns - std::stable_sort(coo_tuples, coo_tuples + num_nonzeros); - - if (verbose) { - printf("done. "); fflush(stdout); - } - - ifs.close(); - } - - - /** - * Builds a dense matrix - */ - int InitDense( - OffsetT num_rows, - OffsetT num_cols, - ValueT default_value = 1.0, - bool verbose = false) - { - if (coo_tuples) - { - fprintf(stderr, "Matrix already constructed\n"); - exit(1); - } - - this->num_rows = num_rows; - this->num_cols = num_cols; - - num_nonzeros = num_rows * num_cols; - coo_tuples = new CooTuple[num_nonzeros]; - - for (OffsetT row = 0; row < num_rows; ++row) - { - for (OffsetT col = 0; col < num_cols; ++col) - { - coo_tuples[(row * num_cols) + col] = CooTuple(row, col, default_value); - } - } - - // Sort by rows, then columns - std::stable_sort(coo_tuples, coo_tuples + num_nonzeros); - - return 0; - } - - /** - * Builds a wheel COO sparse matrix having spokes spokes. - */ - int InitWheel( - OffsetT spokes, - ValueT default_value = 1.0, - bool verbose = false) - { - if (coo_tuples) - { - fprintf(stderr, "Matrix already constructed\n"); - exit(1); - } - - num_rows = spokes + 1; - num_cols = num_rows; - num_nonzeros = spokes * 2; - coo_tuples = new CooTuple[num_nonzeros]; - - // Add spoke num_nonzeros - int current_edge = 0; - for (OffsetT i = 0; i < spokes; i++) - { - coo_tuples[current_edge] = CooTuple(0, i + 1, default_value); - current_edge++; - } - - // Add rim - for (OffsetT i = 0; i < spokes; i++) - { - OffsetT dest = (i + 1) % spokes; - coo_tuples[current_edge] = CooTuple(i + 1, dest + 1, default_value); - current_edge++; - } - - // Sort by rows, then columns - std::stable_sort(coo_tuples, coo_tuples + num_nonzeros); - - return 0; - } - - - /** - * Builds a square 2D grid CSR matrix. Interior num_vertices have degree 5 when including - * a self-loop. - * - * Returns 0 on success, 1 on failure. - */ - int InitGrid2d(OffsetT width, bool self_loop, ValueT default_value = 1.0) - { - if (coo_tuples) - { - fprintf(stderr, "Matrix already constructed\n"); - exit(1); - } - - int interior_nodes = (width - 2) * (width - 2); - int edge_nodes = (width - 2) * 4; - int corner_nodes = 4; - num_rows = width * width; - num_cols = num_rows; - num_nonzeros = (interior_nodes * 4) + (edge_nodes * 3) + (corner_nodes * 2); - - if (self_loop) - num_nonzeros += num_rows; - - coo_tuples = new CooTuple[num_nonzeros]; - int current_edge = 0; - - for (OffsetT j = 0; j < width; j++) - { - for (OffsetT k = 0; k < width; k++) - { - OffsetT me = (j * width) + k; - - // West - OffsetT neighbor = (j * width) + (k - 1); - if (k - 1 >= 0) { - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - - // East - neighbor = (j * width) + (k + 1); - if (k + 1 < width) { - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - - // North - neighbor = ((j - 1) * width) + k; - if (j - 1 >= 0) { - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - - // South - neighbor = ((j + 1) * width) + k; - if (j + 1 < width) { - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - - if (self_loop) - { - neighbor = me; - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - } - } - - // Sort by rows, then columns, update dims - std::stable_sort(coo_tuples, coo_tuples + num_nonzeros); - - return 0; - } - - - /** - * Builds a square 3D grid COO sparse matrix. Interior num_vertices have degree 7 when including - * a self-loop. Values are unintialized, coo_tuples are sorted. - */ - int InitGrid3d(OffsetT width, bool self_loop, ValueT default_value = 1.0) - { - if (coo_tuples) - { - fprintf(stderr, "Matrix already constructed\n"); - return -1; - } - - OffsetT interior_nodes = (width - 2) * (width - 2) * (width - 2); - OffsetT face_nodes = (width - 2) * (width - 2) * 6; - OffsetT edge_nodes = (width - 2) * 12; - OffsetT corner_nodes = 8; - num_cols = width * width * width; - num_rows = num_cols; - num_nonzeros = (interior_nodes * 6) + (face_nodes * 5) + (edge_nodes * 4) + (corner_nodes * 3); - - if (self_loop) - num_nonzeros += num_rows; - - coo_tuples = new CooTuple[num_nonzeros]; - int current_edge = 0; - - for (OffsetT i = 0; i < width; i++) - { - for (OffsetT j = 0; j < width; j++) - { - for (OffsetT k = 0; k < width; k++) - { - - OffsetT me = (i * width * width) + (j * width) + k; - - // Up - OffsetT neighbor = (i * width * width) + (j * width) + (k - 1); - if (k - 1 >= 0) { - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - - // Down - neighbor = (i * width * width) + (j * width) + (k + 1); - if (k + 1 < width) { - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - - // West - neighbor = (i * width * width) + ((j - 1) * width) + k; - if (j - 1 >= 0) { - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - - // East - neighbor = (i * width * width) + ((j + 1) * width) + k; - if (j + 1 < width) { - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - - // North - neighbor = ((i - 1) * width * width) + (j * width) + k; - if (i - 1 >= 0) { - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - - // South - neighbor = ((i + 1) * width * width) + (j * width) + k; - if (i + 1 < width) { - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - - if (self_loop) - { - neighbor = me; - coo_tuples[current_edge] = CooTuple(me, neighbor, default_value); - current_edge++; - } - } - } - } - - // Sort by rows, then columns, update dims - std::stable_sort(coo_tuples, coo_tuples + num_nonzeros); - - return 0; - } -}; - - - -/****************************************************************************** - * COO matrix type - ******************************************************************************/ - - -/** - * CSR sparse format matrix - */ -template< - typename ValueT, - typename OffsetT> -struct CsrMatrix -{ - int num_rows; - int num_cols; - int num_nonzeros; - OffsetT* row_offsets; - OffsetT* column_indices; - ValueT* values; - bool numa_malloc; - - /** - * Constructor - */ - CsrMatrix() : num_rows(0), num_cols(0), num_nonzeros(0), row_offsets(NULL), column_indices(NULL), values(NULL) - { -#ifdef CUB_MKL - numa_malloc = ((numa_available() >= 0) && (numa_num_task_nodes() > 1)); -#else - numa_malloc = false; -#endif - } - - - /** - * Clear - */ - void Clear() - { -#ifdef CUB_MKL - if (numa_malloc) - { - numa_free(row_offsets, sizeof(OffsetT) * (num_rows + 1)); - numa_free(values, sizeof(ValueT) * num_nonzeros); - numa_free(column_indices, sizeof(OffsetT) * num_nonzeros); - } - else - { - if (row_offsets) mkl_free(row_offsets); - if (column_indices) mkl_free(column_indices); - if (values) mkl_free(values); - } - -#else - if (row_offsets) delete[] row_offsets; - if (column_indices) delete[] column_indices; - if (values) delete[] values; -#endif - - row_offsets = NULL; - column_indices = NULL; - values = NULL; - } - - /** - * Destructor - */ - ~CsrMatrix() - { - Clear(); - } - - GraphStats Stats() - { - GraphStats stats; - stats.num_rows = num_rows; - stats.num_cols = num_cols; - stats.num_nonzeros = num_nonzeros; - - // - // Compute diag-distance statistics - // - - OffsetT samples = 0; - double mean = 0.0; - double ss_tot = 0.0; - - for (OffsetT row = 0; row < num_rows; ++row) - { - OffsetT nz_idx_start = row_offsets[row]; - OffsetT nz_idx_end = row_offsets[row + 1]; - - for (int nz_idx = nz_idx_start; nz_idx < nz_idx_end; ++nz_idx) - { - OffsetT col = column_indices[nz_idx]; - double x = (col > row) ? col - row : row - col; - - samples++; - double delta = x - mean; - mean = mean + (delta / samples); - ss_tot += delta * (x - mean); - } - } - stats.diag_dist_mean = mean; - double variance = ss_tot / samples; - stats.diag_dist_std_dev = sqrt(variance); - - - // - // Compute deming statistics - // - - samples = 0; - double mean_x = 0.0; - double mean_y = 0.0; - double ss_x = 0.0; - double ss_y = 0.0; - - for (OffsetT row = 0; row < num_rows; ++row) - { - OffsetT nz_idx_start = row_offsets[row]; - OffsetT nz_idx_end = row_offsets[row + 1]; - - for (int nz_idx = nz_idx_start; nz_idx < nz_idx_end; ++nz_idx) - { - OffsetT col = column_indices[nz_idx]; - - samples++; - double x = col; - double y = row; - double delta; - - delta = x - mean_x; - mean_x = mean_x + (delta / samples); - ss_x += delta * (x - mean_x); - - delta = y - mean_y; - mean_y = mean_y + (delta / samples); - ss_y += delta * (y - mean_y); - } - } - - samples = 0; - double s_xy = 0.0; - double s_xxy = 0.0; - double s_xyy = 0.0; - for (OffsetT row = 0; row < num_rows; ++row) - { - OffsetT nz_idx_start = row_offsets[row]; - OffsetT nz_idx_end = row_offsets[row + 1]; - - for (int nz_idx = nz_idx_start; nz_idx < nz_idx_end; ++nz_idx) - { - OffsetT col = column_indices[nz_idx]; - - samples++; - double x = col; - double y = row; - - double xy = (x - mean_x) * (y - mean_y); - double xxy = (x - mean_x) * (x - mean_x) * (y - mean_y); - double xyy = (x - mean_x) * (y - mean_y) * (y - mean_y); - double delta; - - delta = xy - s_xy; - s_xy = s_xy + (delta / samples); - - delta = xxy - s_xxy; - s_xxy = s_xxy + (delta / samples); - - delta = xyy - s_xyy; - s_xyy = s_xyy + (delta / samples); - } - } - - double s_xx = ss_x / num_nonzeros; - double s_yy = ss_y / num_nonzeros; - - double deming_slope = (s_yy - s_xx + sqrt(((s_yy - s_xx) * (s_yy - s_xx)) + (4 * s_xy * s_xy))) / (2 * s_xy); - - stats.pearson_r = (num_nonzeros * s_xy) / (sqrt(ss_x) * sqrt(ss_y)); - - - // - // Compute row-length statistics - // - - // Sample mean - stats.row_length_mean = double(num_nonzeros) / num_rows; - variance = 0.0; - stats.row_length_skewness = 0.0; - for (OffsetT row = 0; row < num_rows; ++row) - { - OffsetT length = row_offsets[row + 1] - row_offsets[row]; - double delta = double(length) - stats.row_length_mean; - variance += (delta * delta); - stats.row_length_skewness += (delta * delta * delta); - } - variance /= num_rows; - stats.row_length_std_dev = sqrt(variance); - stats.row_length_skewness = (stats.row_length_skewness / num_rows) / pow(stats.row_length_std_dev, 3.0); - stats.row_length_variation = stats.row_length_std_dev / stats.row_length_mean; - - return stats; - } - - /** - * Build CSR matrix from sorted COO matrix - */ - void FromCoo(const CooMatrix &coo_matrix) - { - num_rows = coo_matrix.num_rows; - num_cols = coo_matrix.num_cols; - num_nonzeros = coo_matrix.num_nonzeros; - -#ifdef CUB_MKL - - if (numa_malloc) - { - numa_set_strict(1); -// numa_set_bind_policy(1); - -// values = (ValueT*) numa_alloc_interleaved(sizeof(ValueT) * num_nonzeros); -// row_offsets = (OffsetT*) numa_alloc_interleaved(sizeof(OffsetT) * (num_rows + 1)); -// column_indices = (OffsetT*) numa_alloc_interleaved(sizeof(OffsetT) * num_nonzeros); - - row_offsets = (OffsetT*) numa_alloc_onnode(sizeof(OffsetT) * (num_rows + 1), 0); - column_indices = (OffsetT*) numa_alloc_onnode(sizeof(OffsetT) * num_nonzeros, 0); - values = (ValueT*) numa_alloc_onnode(sizeof(ValueT) * num_nonzeros, 1); - } - else - { - values = (ValueT*) mkl_malloc(sizeof(ValueT) * num_nonzeros, 4096); - row_offsets = (OffsetT*) mkl_malloc(sizeof(OffsetT) * (num_rows + 1), 4096); - column_indices = (OffsetT*) mkl_malloc(sizeof(OffsetT) * num_nonzeros, 4096); - - } - -#else - row_offsets = new OffsetT[num_rows + 1]; - column_indices = new OffsetT[num_nonzeros]; - values = new ValueT[num_nonzeros]; -#endif - - OffsetT prev_row = -1; - for (OffsetT current_edge = 0; current_edge < num_nonzeros; current_edge++) - { - OffsetT current_row = coo_matrix.coo_tuples[current_edge].row; - - // Fill in rows up to and including the current row - for (OffsetT row = prev_row + 1; row <= current_row; row++) - { - row_offsets[row] = current_edge; - } - prev_row = current_row; - - column_indices[current_edge] = coo_matrix.coo_tuples[current_edge].col; - values[current_edge] = coo_matrix.coo_tuples[current_edge].val; - } - - // Fill out any trailing edgeless vertices (and the end-of-list element) - for (OffsetT row = prev_row + 1; row <= num_rows; row++) - { - row_offsets[row] = num_nonzeros; - } - } - - - /** - * Display log-histogram to stdout - */ - void DisplayHistogram() - { - // Initialize - int log_counts[9]; - for (int i = 0; i < 9; i++) - { - log_counts[i] = 0; - } - - // Scan - int max_log_length = -1; - for (OffsetT row = 0; row < num_rows; row++) - { - OffsetT length = row_offsets[row + 1] - row_offsets[row]; - - int log_length = -1; - while (length > 0) - { - length /= 10; - log_length++; - } - if (log_length > max_log_length) - { - max_log_length = log_length; - } - - log_counts[log_length + 1]++; - } - printf("CSR matrix (%d rows, %d columns, %d non-zeros):\n", (int) num_rows, (int) num_cols, (int) num_nonzeros); - for (int i = -1; i < max_log_length + 1; i++) - { - printf("\tDegree 1e%d: \t%d (%.2f%%)\n", i, log_counts[i + 1], (float) log_counts[i + 1] * 100.0 / num_cols); - } - fflush(stdout); - } - - - /** - * Display matrix to stdout - */ - void Display() - { - printf("Input Matrix:\n"); - for (OffsetT row = 0; row < num_rows; row++) - { - printf("%d [@%d, #%d]: ", row, row_offsets[row], row_offsets[row + 1] - row_offsets[row]); - for (OffsetT current_edge = row_offsets[row]; current_edge < row_offsets[row + 1]; current_edge++) - { - printf("%d (%f), ", column_indices[current_edge], values[current_edge]); - } - printf("\n"); - } - fflush(stdout); - } - - -}; - - - -/****************************************************************************** - * Matrix transformations - ******************************************************************************/ - -// Comparator for ordering rows by degree (lowest first), then by row-id (lowest first) -template -struct OrderByLow -{ - OffsetT* row_degrees; - OrderByLow(OffsetT* row_degrees) : row_degrees(row_degrees) {} - - bool operator()(const OffsetT &a, const OffsetT &b) - { - if (row_degrees[a] < row_degrees[b]) - return true; - else if (row_degrees[a] > row_degrees[b]) - return false; - else - return (a < b); - } -}; - -// Comparator for ordering rows by degree (highest first), then by row-id (lowest first) -template -struct OrderByHigh -{ - OffsetT* row_degrees; - OrderByHigh(OffsetT* row_degrees) : row_degrees(row_degrees) {} - - bool operator()(const OffsetT &a, const OffsetT &b) - { - if (row_degrees[a] > row_degrees[b]) - return true; - else if (row_degrees[a] < row_degrees[b]) - return false; - else - return (a < b); - } -}; - - - -/** - * Reverse Cuthill-McKee - */ -template -void RcmRelabel( - CsrMatrix& matrix, - OffsetT* relabel_indices) -{ - // Initialize row degrees - OffsetT* row_degrees_in = new OffsetT[matrix.num_rows]; - OffsetT* row_degrees_out = new OffsetT[matrix.num_rows]; - for (OffsetT row = 0; row < matrix.num_rows; ++row) - { - row_degrees_in[row] = 0; - row_degrees_out[row] = matrix.row_offsets[row + 1] - matrix.row_offsets[row]; - } - for (OffsetT nonzero = 0; nonzero < matrix.num_nonzeros; ++nonzero) - { - row_degrees_in[matrix.column_indices[nonzero]]++; - } - - // Initialize unlabeled set - typedef std::set > UnlabeledSet; - typename UnlabeledSet::key_compare unlabeled_comp(row_degrees_in); - UnlabeledSet unlabeled(unlabeled_comp); - for (OffsetT row = 0; row < matrix.num_rows; ++row) - { - relabel_indices[row] = -1; - unlabeled.insert(row); - } - - // Initialize queue set - std::deque q; - - // Process unlabeled vertices (traverse connected components) - OffsetT relabel_idx = 0; - while (!unlabeled.empty()) - { - // Seed the unvisited frontier queue with the unlabeled vertex of lowest-degree - OffsetT vertex = *unlabeled.begin(); - q.push_back(vertex); - - while (!q.empty()) - { - vertex = q.front(); - q.pop_front(); - - if (relabel_indices[vertex] == -1) - { - // Update this vertex - unlabeled.erase(vertex); - relabel_indices[vertex] = relabel_idx; - relabel_idx++; - - // Sort neighbors by degree - OrderByLow neighbor_comp(row_degrees_in); - std::sort( - matrix.column_indices + matrix.row_offsets[vertex], - matrix.column_indices + matrix.row_offsets[vertex + 1], - neighbor_comp); - - // Inspect neighbors, adding to the out frontier if unlabeled - for (OffsetT neighbor_idx = matrix.row_offsets[vertex]; - neighbor_idx < matrix.row_offsets[vertex + 1]; - ++neighbor_idx) - { - OffsetT neighbor = matrix.column_indices[neighbor_idx]; - q.push_back(neighbor); - } - } - } - } - -/* - // Reverse labels - for (int row = 0; row < matrix.num_rows; ++row) - { - relabel_indices[row] = matrix.num_rows - relabel_indices[row] - 1; - } -*/ - - // Cleanup - if (row_degrees_in) delete[] row_degrees_in; - if (row_degrees_out) delete[] row_degrees_out; -} - - -/** - * Reverse Cuthill-McKee - */ -template -void RcmRelabel( - CsrMatrix& matrix, - bool verbose = false) -{ - // Do not process if not square - if (matrix.num_cols != matrix.num_rows) - { - if (verbose) { - printf("RCM transformation ignored (not square)\n"); fflush(stdout); - } - return; - } - - // Initialize relabel indices - OffsetT* relabel_indices = new OffsetT[matrix.num_rows]; - - if (verbose) { - printf("RCM relabeling... "); fflush(stdout); - } - - RcmRelabel(matrix, relabel_indices); - - if (verbose) { - printf("done. Reconstituting... "); fflush(stdout); - } - - // Create a COO matrix from the relabel indices - CooMatrix coo_matrix; - coo_matrix.InitCsrRelabel(matrix, relabel_indices); - - // Reconstitute the CSR matrix from the sorted COO tuples - if (relabel_indices) delete[] relabel_indices; - matrix.Clear(); - matrix.FromCoo(coo_matrix); - - if (verbose) { - printf("done. "); fflush(stdout); - } -} - - - - diff --git a/ml-xgboost/cub/experimental/spmv_compare.cu b/ml-xgboost/cub/experimental/spmv_compare.cu deleted file mode 100644 index 5554ecb..0000000 --- a/ml-xgboost/cub/experimental/spmv_compare.cu +++ /dev/null @@ -1,917 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIAeBILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -//--------------------------------------------------------------------- -// SpMV comparison tool -//--------------------------------------------------------------------- - -#include -#include -#include -#include -#include -#include - -#include - -#include "sparse_matrix.h" - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include -#include -#include - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants, and type declarations -//--------------------------------------------------------------------- - -bool g_quiet = false; // Whether to display stats in CSV format -bool g_verbose = false; // Whether to display output to console -bool g_verbose2 = false; // Whether to display input to console -CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory - - -//--------------------------------------------------------------------- -// SpMV verification -//--------------------------------------------------------------------- - -// Compute reference SpMV y = Ax -template < - typename ValueT, - typename OffsetT> -void SpmvGold( - CsrMatrix& a, - ValueT* vector_x, - ValueT* vector_y_in, - ValueT* vector_y_out, - ValueT alpha, - ValueT beta) -{ - for (OffsetT row = 0; row < a.num_rows; ++row) - { - ValueT partial = beta * vector_y_in[row]; - for ( - OffsetT offset = a.row_offsets[row]; - offset < a.row_offsets[row + 1]; - ++offset) - { - partial += alpha * a.values[offset] * vector_x[a.column_indices[offset]]; - } - vector_y_out[row] = partial; - } -} - - -//--------------------------------------------------------------------- -// GPU I/O proxy -//--------------------------------------------------------------------- - -/** - * Read every matrix nonzero value, read every corresponding vector value - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - typename ValueT, - typename OffsetT, - typename VectorItr> -__launch_bounds__ (int(BLOCK_THREADS)) -__global__ void NonZeroIoKernel( - SpmvParams params, - VectorItr d_vector_x) -{ - enum - { - TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, - }; - - - ValueT nonzero = 0.0; - - int tile_idx = blockIdx.x; - - OffsetT block_offset = tile_idx * TILE_ITEMS; - - OffsetT column_indices[ITEMS_PER_THREAD]; - ValueT values[ITEMS_PER_THREAD]; - - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - OffsetT nonzero_idx = block_offset + (ITEM * BLOCK_THREADS) + threadIdx.x; - - OffsetT* ci = params.d_column_indices + nonzero_idx; - ValueT*a = params.d_values + nonzero_idx; - - column_indices[ITEM] = (nonzero_idx < params.num_nonzeros) ? *ci : 0; - values[ITEM] = (nonzero_idx < params.num_nonzeros) ? *a : 0.0; - } - - __syncthreads(); - - // Read vector - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - ValueT vector_value = ThreadLoad(params.d_vector_x + column_indices[ITEM]); - nonzero += vector_value * values[ITEM]; - } - - __syncthreads(); - - if (block_offset < params.num_rows) - { - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - OffsetT row_idx = block_offset + (ITEM * BLOCK_THREADS) + threadIdx.x; - if (row_idx < params.num_rows) - { - OffsetT row_end_offset = ThreadLoad(params.d_row_end_offsets + row_idx); - - if ((row_end_offset >= 0) && (nonzero == nonzero)) - params.d_vector_y[row_idx] = nonzero; - } - } - } - -} - - -/** - * Run GPU I/O proxy - */ -template < - typename ValueT, - typename OffsetT> -float TestGpuCsrIoProxy( - SpmvParams& params, - int timing_iterations) -{ - enum { - BLOCK_THREADS = 128, - ITEMS_PER_THREAD = 7, - TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD, - }; - -// size_t smem = 1024 * 16; - size_t smem = 1024 * 0; - - unsigned int nonzero_blocks = (params.num_nonzeros + TILE_SIZE - 1) / TILE_SIZE; - unsigned int row_blocks = (params.num_rows + TILE_SIZE - 1) / TILE_SIZE; - unsigned int blocks = std::max(nonzero_blocks, row_blocks); - - typedef TexRefInputIterator TexItr; - TexItr x_itr; - CubDebugExit(x_itr.BindTexture(params.d_vector_x)); - - // Get device ordinal - int device_ordinal; - CubDebugExit(cudaGetDevice(&device_ordinal)); - - // Get device SM version - int sm_version; - CubDebugExit(SmVersion(sm_version, device_ordinal)); - - void (*kernel)(SpmvParams, TexItr) = NonZeroIoKernel; - - - int spmv_sm_occupancy; - CubDebugExit(MaxSmOccupancy(spmv_sm_occupancy, kernel, BLOCK_THREADS, smem)); - - if (!g_quiet) - printf("NonZeroIoKernel<%d,%d><<<%d, %d>>>, sm occupancy %d\n", BLOCK_THREADS, ITEMS_PER_THREAD, blocks, BLOCK_THREADS, spmv_sm_occupancy); - - // Warmup - NonZeroIoKernel<<>>(params, x_itr); - - // Check for failures - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(SyncStream(0)); - - // Timing - GpuTimer timer; - float elapsed_millis = 0.0; - timer.Start(); - for (int it = 0; it < timing_iterations; ++it) - { - NonZeroIoKernel<<>>(params, x_itr); - } - timer.Stop(); - elapsed_millis += timer.ElapsedMillis(); - - CubDebugExit(x_itr.UnbindTexture()); - - return elapsed_millis / timing_iterations; -} - - - -//--------------------------------------------------------------------- -// cuSparse HybMV -//--------------------------------------------------------------------- - -/** - * Run cuSparse HYB SpMV (specialized for fp32) - */ -template < - typename OffsetT> -float TestCusparseHybmv( - float* vector_y_in, - float* reference_vector_y_out, - SpmvParams& params, - int timing_iterations, - cusparseHandle_t cusparse) -{ - CpuTimer cpu_timer; - cpu_timer.Start(); - - // Construct Hyb matrix - cusparseMatDescr_t mat_desc; - cusparseHybMat_t hyb_desc; - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseCreateMatDescr(&mat_desc)); - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseCreateHybMat(&hyb_desc)); - cusparseStatus_t status = cusparseScsr2hyb( - cusparse, - params.num_rows, params.num_cols, - mat_desc, - params.d_values, params.d_row_end_offsets, params.d_column_indices, - hyb_desc, - 0, - CUSPARSE_HYB_PARTITION_AUTO); - AssertEquals(CUSPARSE_STATUS_SUCCESS, status); - - cudaDeviceSynchronize(); - cpu_timer.Stop(); - float elapsed_millis = cpu_timer.ElapsedMillis(); - printf("HYB setup ms, %.5f, ", elapsed_millis); - - // Reset input/output vector y - CubDebugExit(cudaMemcpy(params.d_vector_y, vector_y_in, sizeof(float) * params.num_rows, cudaMemcpyHostToDevice)); - - // Warmup - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseShybmv( - cusparse, - CUSPARSE_OPERATION_NON_TRANSPOSE, - ¶ms.alpha, mat_desc, - hyb_desc, - params.d_vector_x, ¶ms.beta, params.d_vector_y)); - - if (!g_quiet) - { - int compare = CompareDeviceResults(reference_vector_y_out, params.d_vector_y, params.num_rows, true, g_verbose); - printf("\t%s\n", compare ? "FAIL" : "PASS"); fflush(stdout); - } - - // Timing - elapsed_millis = 0.0; - GpuTimer timer; - - timer.Start(); - for(int it = 0; it < timing_iterations; ++it) - { - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseShybmv( - cusparse, - CUSPARSE_OPERATION_NON_TRANSPOSE, - ¶ms.alpha, mat_desc, - hyb_desc, - params.d_vector_x, ¶ms.beta, params.d_vector_y)); - } - timer.Stop(); - elapsed_millis += timer.ElapsedMillis(); - - // Cleanup - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseDestroyHybMat(hyb_desc)); - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseDestroyMatDescr(mat_desc)); - - return elapsed_millis / timing_iterations; -} - - -/** - * Run cuSparse HYB SpMV (specialized for fp64) - */ -template < - typename OffsetT> -float TestCusparseHybmv( - double* vector_y_in, - double* reference_vector_y_out, - SpmvParams& params, - int timing_iterations, - cusparseHandle_t cusparse) -{ - CpuTimer cpu_timer; - cpu_timer.Start(); - - // Construct Hyb matrix - cusparseMatDescr_t mat_desc; - cusparseHybMat_t hyb_desc; - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseCreateMatDescr(&mat_desc)); - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseCreateHybMat(&hyb_desc)); - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseDcsr2hyb( - cusparse, - params.num_rows, params.num_cols, - mat_desc, - params.d_values, params.d_row_end_offsets, params.d_column_indices, - hyb_desc, - 0, - CUSPARSE_HYB_PARTITION_AUTO)); - - cudaDeviceSynchronize(); - cpu_timer.Stop(); - float elapsed_millis = cpu_timer.ElapsedMillis(); - printf("HYB setup ms, %.5f, ", elapsed_millis); - - // Reset input/output vector y - CubDebugExit(cudaMemcpy(params.d_vector_y, vector_y_in, sizeof(float) * params.num_rows, cudaMemcpyHostToDevice)); - - // Warmup - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseDhybmv( - cusparse, - CUSPARSE_OPERATION_NON_TRANSPOSE, - ¶ms.alpha, mat_desc, - hyb_desc, - params.d_vector_x, ¶ms.beta, params.d_vector_y)); - - if (!g_quiet) - { - int compare = CompareDeviceResults(reference_vector_y_out, params.d_vector_y, params.num_rows, true, g_verbose); - printf("\t%s\n", compare ? "FAIL" : "PASS"); fflush(stdout); - } - - // Timing - elapsed_millis = 0.0; - GpuTimer timer; - - timer.Start(); - for(int it = 0; it < timing_iterations; ++it) - { - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseDhybmv( - cusparse, - CUSPARSE_OPERATION_NON_TRANSPOSE, - ¶ms.alpha, mat_desc, - hyb_desc, - params.d_vector_x, ¶ms.beta, params.d_vector_y)); - } - timer.Stop(); - elapsed_millis += timer.ElapsedMillis(); - - // Cleanup - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseDestroyHybMat(hyb_desc)); - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseDestroyMatDescr(mat_desc)); - - return elapsed_millis / timing_iterations; -} - - - -//--------------------------------------------------------------------- -// cuSparse CsrMV -//--------------------------------------------------------------------- - -/** - * Run cuSparse SpMV (specialized for fp32) - */ -template < - typename OffsetT> -float TestCusparseCsrmv( - float* vector_y_in, - float* reference_vector_y_out, - SpmvParams& params, - int timing_iterations, - cusparseHandle_t cusparse) -{ - cusparseMatDescr_t desc; - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseCreateMatDescr(&desc)); - - // Reset input/output vector y - CubDebugExit(cudaMemcpy(params.d_vector_y, vector_y_in, sizeof(float) * params.num_rows, cudaMemcpyHostToDevice)); - - // Warmup - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseScsrmv( - cusparse, CUSPARSE_OPERATION_NON_TRANSPOSE, - params.num_rows, params.num_cols, params.num_nonzeros, ¶ms.alpha, desc, - params.d_values, params.d_row_end_offsets, params.d_column_indices, - params.d_vector_x, ¶ms.beta, params.d_vector_y)); - - if (!g_quiet) - { - int compare = CompareDeviceResults(reference_vector_y_out, params.d_vector_y, params.num_rows, true, g_verbose); - printf("\t%s\n", compare ? "FAIL" : "PASS"); fflush(stdout); - } - - // Timing - float elapsed_millis = 0.0; - GpuTimer timer; - - timer.Start(); - for(int it = 0; it < timing_iterations; ++it) - { - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseScsrmv( - cusparse, CUSPARSE_OPERATION_NON_TRANSPOSE, - params.num_rows, params.num_cols, params.num_nonzeros, ¶ms.alpha, desc, - params.d_values, params.d_row_end_offsets, params.d_column_indices, - params.d_vector_x, ¶ms.beta, params.d_vector_y)); - } - timer.Stop(); - elapsed_millis += timer.ElapsedMillis(); - - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseDestroyMatDescr(desc)); - return elapsed_millis / timing_iterations; -} - - -/** - * Run cuSparse SpMV (specialized for fp64) - */ -template < - typename OffsetT> -float TestCusparseCsrmv( - double* vector_y_in, - double* reference_vector_y_out, - SpmvParams& params, - int timing_iterations, - cusparseHandle_t cusparse) -{ - cusparseMatDescr_t desc; - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseCreateMatDescr(&desc)); - - // Reset input/output vector y - CubDebugExit(cudaMemcpy(params.d_vector_y, vector_y_in, sizeof(float) * params.num_rows, cudaMemcpyHostToDevice)); - - // Warmup - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseDcsrmv( - cusparse, CUSPARSE_OPERATION_NON_TRANSPOSE, - params.num_rows, params.num_cols, params.num_nonzeros, ¶ms.alpha, desc, - params.d_values, params.d_row_end_offsets, params.d_column_indices, - params.d_vector_x, ¶ms.beta, params.d_vector_y)); - - if (!g_quiet) - { - int compare = CompareDeviceResults(reference_vector_y_out, params.d_vector_y, params.num_rows, true, g_verbose); - printf("\t%s\n", compare ? "FAIL" : "PASS"); fflush(stdout); - } - - // Timing - float elapsed_millis = 0.0; - GpuTimer timer; - timer.Start(); - for(int it = 0; it < timing_iterations; ++it) - { - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseDcsrmv( - cusparse, CUSPARSE_OPERATION_NON_TRANSPOSE, - params.num_rows, params.num_cols, params.num_nonzeros, ¶ms.alpha, desc, - params.d_values, params.d_row_end_offsets, params.d_column_indices, - params.d_vector_x, ¶ms.beta, params.d_vector_y)); - - } - timer.Stop(); - elapsed_millis += timer.ElapsedMillis(); - - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseDestroyMatDescr(desc)); - return elapsed_millis / timing_iterations; -} - -//--------------------------------------------------------------------- -// GPU Merge-based SpMV -//--------------------------------------------------------------------- - -/** - * Run CUB SpMV - */ -template < - typename ValueT, - typename OffsetT> -float TestGpuMergeCsrmv( - ValueT* vector_y_in, - ValueT* reference_vector_y_out, - SpmvParams& params, - int timing_iterations) -{ - // Allocate temporary storage - size_t temp_storage_bytes = 0; - void *d_temp_storage = NULL; - - // Get amount of temporary storage needed - CubDebugExit(DeviceSpmv::CsrMV( - d_temp_storage, temp_storage_bytes, - params.d_values, params.d_row_end_offsets, params.d_column_indices, - params.d_vector_x, params.d_vector_y, - params.num_rows, params.num_cols, params.num_nonzeros, -// params.alpha, params.beta, - (cudaStream_t) 0, false)); - - // Allocate - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Reset input/output vector y - CubDebugExit(cudaMemcpy(params.d_vector_y, vector_y_in, sizeof(ValueT) * params.num_rows, cudaMemcpyHostToDevice)); - - // Warmup - CubDebugExit(DeviceSpmv::CsrMV( - d_temp_storage, temp_storage_bytes, - params.d_values, params.d_row_end_offsets, params.d_column_indices, - params.d_vector_x, params.d_vector_y, - params.num_rows, params.num_cols, params.num_nonzeros, -// params.alpha, params.beta, - (cudaStream_t) 0, !g_quiet)); - - if (!g_quiet) - { - int compare = CompareDeviceResults(reference_vector_y_out, params.d_vector_y, params.num_rows, true, g_verbose); - printf("\t%s\n", compare ? "FAIL" : "PASS"); fflush(stdout); - } - - // Timing - GpuTimer timer; - float elapsed_millis = 0.0; - - timer.Start(); - for(int it = 0; it < timing_iterations; ++it) - { - CubDebugExit(DeviceSpmv::CsrMV( - d_temp_storage, temp_storage_bytes, - params.d_values, params.d_row_end_offsets, params.d_column_indices, - params.d_vector_x, params.d_vector_y, - params.num_rows, params.num_cols, params.num_nonzeros, -// params.alpha, params.beta, - (cudaStream_t) 0, false)); - } - timer.Stop(); - elapsed_millis += timer.ElapsedMillis(); - - return elapsed_millis / timing_iterations; -} - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - -/** - * Display perf - */ -template -void DisplayPerf( - float device_giga_bandwidth, - double avg_millis, - CsrMatrix& csr_matrix) -{ - double nz_throughput, effective_bandwidth; - size_t total_bytes = (csr_matrix.num_nonzeros * (sizeof(ValueT) * 2 + sizeof(OffsetT))) + - (csr_matrix.num_rows) * (sizeof(OffsetT) + sizeof(ValueT)); - - nz_throughput = double(csr_matrix.num_nonzeros) / avg_millis / 1.0e6; - effective_bandwidth = double(total_bytes) / avg_millis / 1.0e6; - - if (!g_quiet) - printf("fp%d: %.4f avg ms, %.5f gflops, %.3lf effective GB/s (%.2f%% peak)\n", - sizeof(ValueT) * 8, - avg_millis, - 2 * nz_throughput, - effective_bandwidth, - effective_bandwidth / device_giga_bandwidth * 100); - else - printf("%.5f, %.6f, %.3lf, %.2f%%, ", - avg_millis, - 2 * nz_throughput, - effective_bandwidth, - effective_bandwidth / device_giga_bandwidth * 100); - - fflush(stdout); -} - - - -/** - * Run tests - */ -template < - typename ValueT, - typename OffsetT> -void RunTest( - bool rcm_relabel, - ValueT alpha, - ValueT beta, - CooMatrix& coo_matrix, - int timing_iterations, - CommandLineArgs& args) -{ - // Adaptive timing iterations: run 16 billion nonzeros through - if (timing_iterations == -1) - timing_iterations = std::min(50000ull, std::max(100ull, ((16ull << 30) / coo_matrix.num_nonzeros))); - - if (!g_quiet) - printf("\t%d timing iterations\n", timing_iterations); - - // Convert to CSR - CsrMatrix csr_matrix; - csr_matrix.FromCoo(coo_matrix); - if (!args.CheckCmdLineFlag("csrmv")) - coo_matrix.Clear(); - - // Relabel - if (rcm_relabel) - { - if (!g_quiet) - { - csr_matrix.Stats().Display(); - printf("\n"); - csr_matrix.DisplayHistogram(); - printf("\n"); - if (g_verbose2) - csr_matrix.Display(); - printf("\n"); - } - - RcmRelabel(csr_matrix, !g_quiet); - - if (!g_quiet) printf("\n"); - } - - // Display matrix info - csr_matrix.Stats().Display(!g_quiet); - if (!g_quiet) - { - printf("\n"); - csr_matrix.DisplayHistogram(); - printf("\n"); - if (g_verbose2) - csr_matrix.Display(); - printf("\n"); - } - fflush(stdout); - - // Allocate input and output vectors - ValueT* vector_x = new ValueT[csr_matrix.num_cols]; - ValueT* vector_y_in = new ValueT[csr_matrix.num_rows]; - ValueT* vector_y_out = new ValueT[csr_matrix.num_rows]; - - for (int col = 0; col < csr_matrix.num_cols; ++col) - vector_x[col] = 1.0; - - for (int row = 0; row < csr_matrix.num_rows; ++row) - vector_y_in[row] = 1.0; - - // Compute reference answer - SpmvGold(csr_matrix, vector_x, vector_y_in, vector_y_out, alpha, beta); - - float avg_millis; - - if (g_quiet) { - printf("%s, %s, ", args.deviceProp.name, (sizeof(ValueT) > 4) ? "fp64" : "fp32"); fflush(stdout); - } - - // Get GPU device bandwidth (GB/s) - float device_giga_bandwidth = args.device_giga_bandwidth; - - // Allocate and initialize GPU problem - SpmvParams params; - - CubDebugExit(g_allocator.DeviceAllocate((void **) ¶ms.d_values, sizeof(ValueT) * csr_matrix.num_nonzeros)); - CubDebugExit(g_allocator.DeviceAllocate((void **) ¶ms.d_row_end_offsets, sizeof(OffsetT) * (csr_matrix.num_rows + 1))); - CubDebugExit(g_allocator.DeviceAllocate((void **) ¶ms.d_column_indices, sizeof(OffsetT) * csr_matrix.num_nonzeros)); - CubDebugExit(g_allocator.DeviceAllocate((void **) ¶ms.d_vector_x, sizeof(ValueT) * csr_matrix.num_cols)); - CubDebugExit(g_allocator.DeviceAllocate((void **) ¶ms.d_vector_y, sizeof(ValueT) * csr_matrix.num_rows)); - params.num_rows = csr_matrix.num_rows; - params.num_cols = csr_matrix.num_cols; - params.num_nonzeros = csr_matrix.num_nonzeros; - params.alpha = alpha; - params.beta = beta; - - CubDebugExit(cudaMemcpy(params.d_values, csr_matrix.values, sizeof(ValueT) * csr_matrix.num_nonzeros, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(params.d_row_end_offsets, csr_matrix.row_offsets, sizeof(OffsetT) * (csr_matrix.num_rows + 1), cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(params.d_column_indices, csr_matrix.column_indices, sizeof(OffsetT) * csr_matrix.num_nonzeros, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(params.d_vector_x, vector_x, sizeof(ValueT) * csr_matrix.num_cols, cudaMemcpyHostToDevice)); - - if (!g_quiet) printf("\n\n"); - printf("GPU CSR I/O Prox, "); fflush(stdout); - avg_millis = TestGpuCsrIoProxy(params, timing_iterations); - DisplayPerf(device_giga_bandwidth, avg_millis, csr_matrix); - - if (args.CheckCmdLineFlag("csrmv")) - { - if (!g_quiet) printf("\n\n"); - printf("CUB, "); fflush(stdout); - avg_millis = TestGpuMergeCsrmv(vector_y_in, vector_y_out, params, timing_iterations); - DisplayPerf(device_giga_bandwidth, avg_millis, csr_matrix); - } - - // Initialize cuSparse - cusparseHandle_t cusparse; - AssertEquals(CUSPARSE_STATUS_SUCCESS, cusparseCreate(&cusparse)); - - if (args.CheckCmdLineFlag("csrmv")) - { - if (!g_quiet) printf("\n\n"); - printf("Cusparse CsrMV, "); fflush(stdout); - avg_millis = TestCusparseCsrmv(vector_y_in, vector_y_out, params, timing_iterations, cusparse); - DisplayPerf(device_giga_bandwidth, avg_millis, csr_matrix); - } - - if (args.CheckCmdLineFlag("hybmv")) - { - if (!g_quiet) printf("\n\n"); - printf("Cusparse HybMV, "); fflush(stdout); - - avg_millis = TestCusparseHybmv(vector_y_in, vector_y_out, params, timing_iterations, cusparse); - DisplayPerf(device_giga_bandwidth, avg_millis, csr_matrix); - } - - - // Cleanup - if (params.d_values) CubDebugExit(g_allocator.DeviceFree(params.d_values)); - if (params.d_row_end_offsets) CubDebugExit(g_allocator.DeviceFree(params.d_row_end_offsets)); - if (params.d_column_indices) CubDebugExit(g_allocator.DeviceFree(params.d_column_indices)); - if (params.d_vector_x) CubDebugExit(g_allocator.DeviceFree(params.d_vector_x)); - if (params.d_vector_y) CubDebugExit(g_allocator.DeviceFree(params.d_vector_y)); - - if (vector_x) delete[] vector_x; - if (vector_y_in) delete[] vector_y_in; - if (vector_y_out) delete[] vector_y_out; -} - -/** - * Run tests - */ -template < - typename ValueT, - typename OffsetT> -void RunTests( - bool rcm_relabel, - ValueT alpha, - ValueT beta, - const std::string& mtx_filename, - int grid2d, - int grid3d, - int wheel, - int dense, - int timing_iterations, - CommandLineArgs& args) -{ - // Initialize matrix in COO form - CooMatrix coo_matrix; - - if (!mtx_filename.empty()) - { - // Parse matrix market file - printf("%s, ", mtx_filename.c_str()); fflush(stdout); - coo_matrix.InitMarket(mtx_filename, 1.0, !g_quiet); - - if ((coo_matrix.num_rows == 1) || (coo_matrix.num_cols == 1) || (coo_matrix.num_nonzeros == 1)) - { - if (!g_quiet) printf("Trivial dataset\n"); - exit(0); - } - } - else if (grid2d > 0) - { - // Generate 2D lattice - printf("grid2d_%d, ", grid2d); fflush(stdout); - coo_matrix.InitGrid2d(grid2d, false); - } - else if (grid3d > 0) - { - // Generate 3D lattice - printf("grid3d_%d, ", grid3d); fflush(stdout); - coo_matrix.InitGrid3d(grid3d, false); - } - else if (wheel > 0) - { - // Generate wheel graph - printf("wheel_%d, ", grid2d); fflush(stdout); - coo_matrix.InitWheel(wheel); - } - else if (dense > 0) - { - // Generate dense graph - OffsetT size = 1 << 24; // 16M nnz - args.GetCmdLineArgument("size", size); - - OffsetT rows = size / dense; - printf("dense_%d_x_%d, ", rows, dense); fflush(stdout); - coo_matrix.InitDense(rows, dense); - } - else - { - fprintf(stderr, "No graph type specified.\n"); - exit(1); - } - - RunTest( - rcm_relabel, - alpha, - beta, - coo_matrix, - timing_iterations, - args); -} - - - -/** - * Main - */ -int main(int argc, char **argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - if (args.CheckCmdLineFlag("help")) - { - printf( - "%s " - "[--csrmv | --hybmv | --bsrmv ] " - "[--device=] " - "[--quiet] " - "[--v] " - "[--i=] " - "[--fp64] " - "[--rcm] " - "[--alpha=] " - "[--beta=] " - "\n\t" - "--mtx= " - "\n\t" - "--dense=" - "\n\t" - "--grid2d=" - "\n\t" - "--grid3d=" - "\n\t" - "--wheel=" - "\n", argv[0]); - exit(0); - } - - bool fp64; - bool rcm_relabel; - std::string mtx_filename; - int grid2d = -1; - int grid3d = -1; - int wheel = -1; - int dense = -1; - int timing_iterations = -1; - float alpha = 1.0; - float beta = 0.0; - - g_verbose = args.CheckCmdLineFlag("v"); - g_verbose2 = args.CheckCmdLineFlag("v2"); - g_quiet = args.CheckCmdLineFlag("quiet"); - fp64 = args.CheckCmdLineFlag("fp64"); - rcm_relabel = args.CheckCmdLineFlag("rcm"); - args.GetCmdLineArgument("i", timing_iterations); - args.GetCmdLineArgument("mtx", mtx_filename); - args.GetCmdLineArgument("grid2d", grid2d); - args.GetCmdLineArgument("grid3d", grid3d); - args.GetCmdLineArgument("wheel", wheel); - args.GetCmdLineArgument("dense", dense); - args.GetCmdLineArgument("alpha", alpha); - args.GetCmdLineArgument("beta", beta); - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Run test(s) - if (fp64) - { - RunTests(rcm_relabel, alpha, beta, mtx_filename, grid2d, grid3d, wheel, dense, timing_iterations, args); - } - else - { - RunTests(rcm_relabel, alpha, beta, mtx_filename, grid2d, grid3d, wheel, dense, timing_iterations, args); - } - - CubDebugExit(cudaDeviceSynchronize()); - printf("\n"); - - return 0; -} diff --git a/ml-xgboost/cub/experimental/spmv_script.sh b/ml-xgboost/cub/experimental/spmv_script.sh deleted file mode 100644 index f432043..0000000 --- a/ml-xgboost/cub/experimental/spmv_script.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -for i in 1 2 4 8 16 32 64 128 256 512 1024 2048 4096 8192 16384 32768 65536 131072 262144 524288 1048576 2097152 4194304 8388608 16777216 -do - echo `date`, `$1 --dense=$i $2 $3 $4 $5 $6 $7` -done - -echo -echo - -for i in `ls /home/dumerrill/graphs/spmv/*.mtx` -do - if [[ ( "`head -n 50 $i | grep complex`" = "" ) && ( "`head -n 50 $i | grep array`" = "" ) ]] - then - echo `date`, `$1 --mtx=$i $2 $3 $4 $5 $6 $7 2>/dev/null` - fi -done - -echo -echo - -for i in `ls /scratch/dumerrill/graphs/mtx/*.mtx` -#for i in `ls /cygdrive/w/Dev/UFget/mtx/*.mtx` -do - if [[ ( "`head -n 50 $i | grep complex`" = "" ) && ( "`head -n 50 $i | grep array`" = "" ) ]] - then - echo `date`, `$1 --mtx=$i $2 $3 $4 $5 $6 $7 2>/dev/null` - fi -done - diff --git a/ml-xgboost/cub/test/.gitignore b/ml-xgboost/cub/test/.gitignore deleted file mode 100644 index 51c0abb..0000000 --- a/ml-xgboost/cub/test/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/bin -/link_main.obj diff --git a/ml-xgboost/cub/test/Makefile b/ml-xgboost/cub/test/Makefile deleted file mode 100644 index 90585f6..0000000 --- a/ml-xgboost/cub/test/Makefile +++ /dev/null @@ -1,453 +0,0 @@ -#/****************************************************************************** -# * Copyright (c) 2011, Duane Merrill. All rights reserved. -# * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. -# * -# * Redistribution and use in source and binary forms, with or without -# * modification, are permitted provided that the following conditions are met: -# * * Redistributions of source code must retain the above copyright -# * notice, this list of conditions and the following disclaimer. -# * * Redistributions in binary form must reproduce the above copyright -# * notice, this list of conditions and the following disclaimer in the -# * documentation and/or other materials provided with the distribution. -# * * Neither the name of the NVIDIA CORPORATION nor the -# * names of its contributors may be used to endorse or promote products -# * derived from this software without specific prior written permission. -# * -# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY -# * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# * -#******************************************************************************/ - - -#------------------------------------------------------------------------------- -# -# Makefile usage -# -# make [sm=] [cdp=<0|1>] [force32=<0|1>] [abi=<0|1>] [open64=<0|1>] [verbose=<0|1>] [keep=<0|1>] [quicktest=<0|1>] [quickertest=<0|1>] -# -#------------------------------------------------------------------------------- - -include ../common.mk - -#------------------------------------------------------------------------------- -# Commandline Options -#------------------------------------------------------------------------------- - -# Testing mode option (quick/thorough) -ifeq ($(quickertest), 1) - NVCCFLAGS += -DQUICKER_TEST - TEST_SUFFIX = quicker -else ifeq ($(quicktest), 1) - NVCCFLAGS += -DQUICK_TEST - TEST_SUFFIX = quick -else - TEST_SUFFIX = thorough - NPPI = -endif - - -# CUDA memcheck (enabled by default) -ifeq ($(memcheck), 0) - MEMCHECK = -else - MEMCHECK = cuda-memcheck -endif - - -#------------------------------------------------------------------------------- -# Compiler and compilation platform -#------------------------------------------------------------------------------- - -# Includes -INC += -I$(CUB_DIR) -I$(CUB_DIR)test - -# Suffix to append to each binary -SUFFIX = $(BIN_SUFFIX)_$(TEST_SUFFIX) - -# Define test arch -DEFINES += -DTEST_ARCH=$(TEST_ARCH) - - -#------------------------------------------------------------------------------- -# Dependency Lists -#------------------------------------------------------------------------------- - -rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) - -DEPS = $(CUB_DEPS) \ - $(CUB_DIR)test/Makefile \ - $(CUB_DIR)test/test_util.h \ - $(CUB_DIR)test/mersenne.h \ - -BLOCK_REDUCE = test_block_reduce_raking \ - test_block_reduce_warp_reductions - - -BLOCK_SCAN = test_block_scan_raking \ - test_block_scan_raking_memoize \ - test_block_scan_warp_scans - - -BLOCK_RADIX_SORT = test_block_radix_sort_keys \ - test_block_radix_sort_pairs - - -ALL = link \ - test_iterator \ - test_allocator \ - test_warp_scan \ - test_warp_reduce \ - $(BLOCK_REDUCE) \ - $(BLOCK_SCAN) \ - $(BLOCK_RADIX_SORT) \ - test_block_load_store \ - test_block_histogram \ - test_device_reduce \ - test_device_histogram \ - test_device_scan \ - test_device_radix_sort \ - test_device_reduce_by_key\ - test_device_run_length_encode\ - test_device_select_unique \ - test_device_select_if - -# test_grid_barrier \ fails on sm110 -# test_device_seg_reduce - - - -#------------------------------------------------------------------------------- -# make default -#------------------------------------------------------------------------------- - -default: - - -#------------------------------------------------------------------------------- -# make clean -#------------------------------------------------------------------------------- - -clean : - rm -f bin/*$(CPU_ARCH_SUFFIX)* - rm -f *.i* *.cubin *.cu.c *.cudafe* *.fatbin.c *.ptx *.hash *.cu.cpp *.o - - -#------------------------------------------------------------------------------- -# make all -#------------------------------------------------------------------------------- - -all : $(ALL) - - -#------------------------------------------------------------------------------- -# make run -#------------------------------------------------------------------------------- - -run : - for i in $(ALL); do $(MEMCHECK) ./bin/$${i}_$(SUFFIX) --device=$(device) || exit 1; done - -run_block_reduce : - for i in $(BLOCK_REDUCE); do $(MEMCHECK) ./bin/$${i}_$(SUFFIX) --device=$(device) || exit 1; done - -run_block_scan : - for i in $(BLOCK_SCAN); do $(MEMCHECK) ./bin/$${i}_$(SUFFIX) --device=$(device) || exit 1; done - -run_block_radix_sort : - for i in $(BLOCK_RADIX_SORT); do $(MEMCHECK) ./bin/$${i}_$(SUFFIX) --device=$(device) || exit 1; done - - - -#------------------------------------------------------------------------------- -# make link -#------------------------------------------------------------------------------- - -link : bin/link_$(SUFFIX) - -bin/link_$(SUFFIX) : link_a.cu link_b.cu link_main.cpp $(DEPS) - mkdir -p bin - $(NVCC) $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(DEFINES) $(SM_TARGETS) link_a.cu -c -o bin/link_a.obj - $(NVCC) $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(DEFINES) $(SM_TARGETS) link_b.cu -c -o bin/link_b.obj - $(NVCC) $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(DEFINES) $(SM_TARGETS) link_main.cpp bin/link_a.obj bin/link_b.obj -o bin/link_$(SUFFIX) - - -#------------------------------------------------------------------------------- -# make test_iterator -#------------------------------------------------------------------------------- - -test_iterator: bin/test_iterator_$(SUFFIX) - -bin/test_iterator_$(SUFFIX) : test_iterator.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_iterator_$(SUFFIX) test_iterator.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_allocator -#------------------------------------------------------------------------------- - -test_allocator: bin/test_allocator_$(SUFFIX) - -bin/test_allocator_$(SUFFIX) : test_allocator.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_allocator_$(SUFFIX) test_allocator.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_grid_barrier -#------------------------------------------------------------------------------- - -test_grid_barrier: bin/test_grid_barrier_$(SUFFIX) - -bin/test_grid_barrier_$(SUFFIX) : test_grid_barrier.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_grid_barrier_$(SUFFIX) test_grid_barrier.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_warp_scan -#------------------------------------------------------------------------------- - -test_warp_scan: bin/test_warp_scan_$(SUFFIX) - -bin/test_warp_scan_$(SUFFIX) : test_warp_scan.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_warp_scan_$(SUFFIX) test_warp_scan.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_warp_reduce -#------------------------------------------------------------------------------- - -test_warp_reduce: bin/test_warp_reduce_$(SUFFIX) - -bin/test_warp_reduce_$(SUFFIX) : test_warp_reduce.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_warp_reduce_$(SUFFIX) test_warp_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_block_reduce_raking -#------------------------------------------------------------------------------- - -test_block_reduce_raking: bin/test_block_reduce_raking_$(SUFFIX) - -bin/test_block_reduce_raking_$(SUFFIX) : test_block_reduce.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) -DTEST_RAKING $(SM_TARGETS) -o bin/test_block_reduce_raking_$(SUFFIX) test_block_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_block_reduce_warp_reductions -#------------------------------------------------------------------------------- - -test_block_reduce_warp_reductions: bin/test_block_reduce_warp_reductions_$(SUFFIX) - -bin/test_block_reduce_warp_reductions_$(SUFFIX) : test_block_reduce.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) -DTEST_WARP_REDUCTIONS $(SM_TARGETS) -o bin/test_block_reduce_warp_reductions_$(SUFFIX) test_block_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_block_reduce -#------------------------------------------------------------------------------- - -test_block_reduce: $(BLOCK_REDUCE) - - -#------------------------------------------------------------------------------- -# make test_block_scan_raking -#------------------------------------------------------------------------------- - -test_block_scan_raking: bin/test_block_scan_raking_$(SUFFIX) - -bin/test_block_scan_raking_$(SUFFIX) : test_block_scan.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) -DTEST_RAKING $(SM_TARGETS) -o bin/test_block_scan_raking_$(SUFFIX) test_block_scan.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_block_scan_raking_memoize -#------------------------------------------------------------------------------- - -test_block_scan_raking_memoize: bin/test_block_scan_raking_memoize_$(SUFFIX) - -bin/test_block_scan_raking_memoize_$(SUFFIX) : test_block_scan.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) -DTEST_RAKING_MEMOIZE $(SM_TARGETS) -o bin/test_block_scan_raking_memoize_$(SUFFIX) test_block_scan.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_block_scan_warp_scans -#------------------------------------------------------------------------------- - -test_block_scan_warp_scans: bin/test_block_scan_warp_scans_$(SUFFIX) - -bin/test_block_scan_warp_scans_$(SUFFIX) : test_block_scan.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) -DTEST_WARP_SCANS $(SM_TARGETS) -o bin/test_block_scan_warp_scans_$(SUFFIX) test_block_scan.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_block_scan -#------------------------------------------------------------------------------- - -test_block_scan: $(BLOCK_SCAN) - - -#------------------------------------------------------------------------------- -# make test_block_load_store -#------------------------------------------------------------------------------- - -test_block_load_store: bin/test_block_load_store_$(SUFFIX) - -bin/test_block_load_store_$(SUFFIX) : test_block_load_store.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_block_load_store_$(SUFFIX) test_block_load_store.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_block_radix_sort_keys -#------------------------------------------------------------------------------- - -test_block_radix_sort_keys: bin/test_block_radix_sort_keys_$(SUFFIX) - -bin/test_block_radix_sort_keys_$(SUFFIX) : test_block_radix_sort.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) -DTEST_KEYS_ONLY $(SM_TARGETS) -o bin/test_block_radix_sort_keys_$(SUFFIX) test_block_radix_sort.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - -#------------------------------------------------------------------------------- -# make test_block_radix_sort_pairs -#------------------------------------------------------------------------------- - -test_block_radix_sort_pairs: bin/test_block_radix_sort_pairs_$(SUFFIX) - -bin/test_block_radix_sort_pairs_$(SUFFIX) : test_block_radix_sort.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_block_radix_sort_pairs_$(SUFFIX) test_block_radix_sort.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_block_radix_sort -#------------------------------------------------------------------------------- - -test_block_radix_sort : $(BLOCK_RADIX_SORT) - - -#------------------------------------------------------------------------------- -# make test_block_histogram -#------------------------------------------------------------------------------- - -test_block_histogram: bin/test_block_histogram_$(SUFFIX) - -bin/test_block_histogram_$(SUFFIX) : test_block_histogram.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_block_histogram_$(SUFFIX) test_block_histogram.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_device_reduce -#------------------------------------------------------------------------------- - -test_device_reduce: bin/test_device_reduce_$(SUFFIX) - -bin/test_device_reduce_$(SUFFIX) : test_device_reduce.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_device_reduce_$(SUFFIX) test_device_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_device_histogram -#------------------------------------------------------------------------------- - -test_device_histogram: bin/test_device_histogram_$(SUFFIX) - -bin/test_device_histogram_$(SUFFIX) : test_device_histogram.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_device_histogram_$(SUFFIX) test_device_histogram.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) $(NPPI) -O3 - - -#------------------------------------------------------------------------------- -# make test_device_scan -#------------------------------------------------------------------------------- - -test_device_scan: bin/test_device_scan_$(SUFFIX) - -bin/test_device_scan_$(SUFFIX) : test_device_scan.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_device_scan_$(SUFFIX) test_device_scan.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_device_radix_sort -#------------------------------------------------------------------------------- - -test_device_radix_sort: bin/test_device_radix_sort_$(SUFFIX) - -bin/test_device_radix_sort_$(SUFFIX) : test_device_radix_sort.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_device_radix_sort_$(SUFFIX) test_device_radix_sort.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_device_select_unique -#------------------------------------------------------------------------------- - -test_device_select_unique: bin/test_device_select_unique_$(SUFFIX) - -bin/test_device_select_unique_$(SUFFIX) : test_device_select_unique.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_device_select_unique_$(SUFFIX) test_device_select_unique.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - -#------------------------------------------------------------------------------- -# make test_device_select_if -#------------------------------------------------------------------------------- - -test_device_select_if: bin/test_device_select_if_$(SUFFIX) - -bin/test_device_select_if_$(SUFFIX) : test_device_select_if.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_device_select_if_$(SUFFIX) test_device_select_if.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - -#------------------------------------------------------------------------------- -# make test_device_reduce_by_key -#------------------------------------------------------------------------------- - -test_device_reduce_by_key: bin/test_device_reduce_by_key_$(SUFFIX) - -bin/test_device_reduce_by_key_$(SUFFIX) : test_device_reduce_by_key.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_device_reduce_by_key_$(SUFFIX) test_device_reduce_by_key.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - -#------------------------------------------------------------------------------- -# make test_device_run_length_encode -#------------------------------------------------------------------------------- - -test_device_run_length_encode: bin/test_device_run_length_encode_$(SUFFIX) - -bin/test_device_run_length_encode_$(SUFFIX) : test_device_run_length_encode.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_device_run_length_encode_$(SUFFIX) test_device_run_length_encode.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - - - -#------------------------------------------------------------------------------- -# make test_device_seg_reduce -#------------------------------------------------------------------------------- -# -#test_device_seg_reduce: bin/test_device_seg_reduce_$(SUFFIX) -# -#bin/test_device_seg_reduce_$(SUFFIX) : test_device_seg_reduce.cu $(DEPS) -# mkdir -p bin -# $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/test_device_seg_reduce_$(SUFFIX) test_device_seg_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 - - diff --git a/ml-xgboost/cub/test/link_a.cu b/ml-xgboost/cub/test/link_a.cu deleted file mode 100644 index 8a9b19f..0000000 --- a/ml-xgboost/cub/test/link_a.cu +++ /dev/null @@ -1,11 +0,0 @@ -#include - -void a() -{ - printf("a() called\n"); - - cub::DoubleBuffer d_keys; - cub::DoubleBuffer d_values; - size_t temp_storage_bytes = 0; - cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_keys, d_values, 1024); -} diff --git a/ml-xgboost/cub/test/link_b.cu b/ml-xgboost/cub/test/link_b.cu deleted file mode 100644 index a19ec40..0000000 --- a/ml-xgboost/cub/test/link_b.cu +++ /dev/null @@ -1,11 +0,0 @@ -#include - -void b() -{ - printf("b() called\n"); - - cub::DoubleBuffer d_keys; - cub::DoubleBuffer d_values; - size_t temp_storage_bytes = 0; - cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_keys, d_values, 1024); -} diff --git a/ml-xgboost/cub/test/link_main.cpp b/ml-xgboost/cub/test/link_main.cpp deleted file mode 100644 index ef677ee..0000000 --- a/ml-xgboost/cub/test/link_main.cpp +++ /dev/null @@ -1,10 +0,0 @@ -#include - -extern void a(); -extern void b(); - -int main() -{ - printf("hello world\n"); - return 0; -} diff --git a/ml-xgboost/cub/test/mersenne.h b/ml-xgboost/cub/test/mersenne.h deleted file mode 100644 index 76aae80..0000000 --- a/ml-xgboost/cub/test/mersenne.h +++ /dev/null @@ -1,160 +0,0 @@ -/* - A C-program for MT19937, with initialization improved 2002/1/26. - Coded by Takuji Nishimura and Makoto Matsumoto. - - Before using, initialize the state by using init_genrand(seed) - or init_by_array(init_key, key_length). - - Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. The names of its contributors may not be used to endorse or promote - products derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - Any feedback is very welcome. - http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html - email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space) - */ - -#include - -namespace mersenne { - -/* Period parameters */ -const unsigned int N = 624; -const unsigned int M = 397; -const unsigned int MATRIX_A = 0x9908b0df; /* constant vector a */ -const unsigned int UPPER_MASK = 0x80000000; /* most significant w-r bits */ -const unsigned int LOWER_MASK = 0x7fffffff; /* least significant r bits */ - -static unsigned int mt[N]; /* the array for the state vector */ -static int mti = N + 1; /* mti==N+1 means mt[N] is not initialized */ - -/* initializes mt[N] with a seed */ -void init_genrand(unsigned int s) -{ - mt[0] = s & 0xffffffff; - for (mti = 1; mti < N; mti++) - { - mt[mti] = (1812433253 * (mt[mti - 1] ^ (mt[mti - 1] >> 30)) + mti); - - /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for mtiplier. */ - /* In the previous versions, MSBs of the seed affect */ - /* only MSBs of the array mt[]. */ - /* 2002/01/09 modified by Makoto Matsumoto */ - - mt[mti] &= 0xffffffff; - /* for >32 bit machines */ - } -} - -/* initialize by an array with array-length */ -/* init_key is the array for initializing keys */ -/* key_length is its length */ -/* slight change for C++, 2004/2/26 */ -void init_by_array(unsigned int init_key[], int key_length) -{ - int i, j, k; - init_genrand(19650218); - i = 1; - j = 0; - k = (N > key_length ? N : key_length); - for (; k; k--) - { - mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525)) - + init_key[j] + j; /* non linear */ - mt[i] &= 0xffffffff; /* for WORDSIZE > 32 machines */ - i++; - j++; - if (i >= N) - { - mt[0] = mt[N - 1]; - i = 1; - } - if (j >= key_length) j = 0; - } - for (k = N - 1; k; k--) - { - mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1566083941)) - i; /* non linear */ - mt[i] &= 0xffffffff; /* for WORDSIZE > 32 machines */ - i++; - if (i >= N) - { - mt[0] = mt[N - 1]; - i = 1; - } - } - - mt[0] = 0x80000000; /* MSB is 1; assuring non-zero initial array */ -} - -/* generates a random number on [0,0xffffffff]-interval */ -unsigned int genrand_int32(void) -{ - unsigned int y; - static unsigned int mag01[2] = { 0x0, MATRIX_A }; - - /* mag01[x] = x * MATRIX_A for x=0,1 */ - - if (mti >= N) - { /* generate N words at one time */ - int kk; - - if (mti == N + 1) /* if init_genrand() has not been called, */ - init_genrand(5489); /* a defat initial seed is used */ - - for (kk = 0; kk < N - M; kk++) - { - y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); - mt[kk] = mt[kk + M] ^ (y >> 1) ^ mag01[y & 0x1]; - } - for (; kk < N - 1; kk++) - { - y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); - mt[kk] = mt[kk + (M - N)] ^ (y >> 1) ^ mag01[y & 0x1]; - } - y = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK); - mt[N - 1] = mt[M - 1] ^ (y >> 1) ^ mag01[y & 0x1]; - - mti = 0; - } - - y = mt[mti++]; - - /* Tempering */ - y ^= (y >> 11); - y ^= (y << 7) & 0x9d2c5680; - y ^= (y << 15) & 0xefc60000; - y ^= (y >> 18); - - return y; -} - - - -} // namespace mersenne diff --git a/ml-xgboost/cub/test/test_allocator.cu b/ml-xgboost/cub/test/test_allocator.cu deleted file mode 100644 index 94d9df1..0000000 --- a/ml-xgboost/cub/test/test_allocator.cu +++ /dev/null @@ -1,459 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test evaluation for caching allocator of device memory - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include - -#include -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=]" - "[--bytes=]" - "[--i=]" - "\n", argv[0]); - exit(0); - } - -#if (CUB_PTX_ARCH == 0) - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Get number of GPUs and current GPU - int num_gpus; - int initial_gpu; - int timing_iterations = 10000; - int timing_bytes = 1024 * 1024; - - if (CubDebug(cudaGetDeviceCount(&num_gpus))) exit(1); - if (CubDebug(cudaGetDevice(&initial_gpu))) exit(1); - args.GetCmdLineArgument("i", timing_iterations); - args.GetCmdLineArgument("bytes", timing_bytes); - - // Create default allocator (caches up to 6MB in device allocations per GPU) - CachingDeviceAllocator allocator; - allocator.debug = true; - - printf("Running single-gpu tests...\n"); fflush(stdout); - - // - // Test0 - // - - // Create a new stream - cudaStream_t other_stream; - CubDebugExit(cudaStreamCreate(&other_stream)); - - // Allocate 999 bytes on the current gpu in stream0 - char *d_999B_stream0_a; - char *d_999B_stream0_b; - CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0)); - - // Run some big kernel in stream 0 - EmptyKernel<<<32000, 512, 1024 * 8, 0>>>(); - - // Free d_999B_stream0_a - CubDebugExit(allocator.DeviceFree(d_999B_stream0_a)); - - // Allocate another 999 bytes in stream 0 - CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0)); - - // Check that that we have 1 live block on the initial GPU - AssertEquals(allocator.live_blocks.size(), 1); - - // Check that that we have no cached block on the initial GPU - AssertEquals(allocator.cached_blocks.size(), 0); - - // Run some big kernel in stream 0 - EmptyKernel<<<32000, 512, 1024 * 8, 0>>>(); - - // Free d_999B_stream0_b - CubDebugExit(allocator.DeviceFree(d_999B_stream0_b)); - - // Allocate 999 bytes on the current gpu in other_stream - char *d_999B_stream_other_a; - char *d_999B_stream_other_b; - allocator.DeviceAllocate((void **) &d_999B_stream_other_a, 999, other_stream); - - // Check that that we have 1 live blocks on the initial GPU (that we allocated a new one because d_999B_stream0_b is only available for stream 0 until it becomes idle) - AssertEquals(allocator.live_blocks.size(), 1); - - // Check that that we have one cached block on the initial GPU - AssertEquals(allocator.cached_blocks.size(), 1); - - // Run some big kernel in other_stream - EmptyKernel<<<32000, 512, 1024 * 8, other_stream>>>(); - - // Free d_999B_stream_other - CubDebugExit(allocator.DeviceFree(d_999B_stream_other_a)); - - // Check that we can now use both allocations in stream 0 after synchronizing the device - CubDebugExit(cudaDeviceSynchronize()); - CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0)); - CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0)); - - // Check that that we have 2 live blocks on the initial GPU - AssertEquals(allocator.live_blocks.size(), 2); - - // Check that that we have no cached block on the initial GPU - AssertEquals(allocator.cached_blocks.size(), 0); - - // Free d_999B_stream0_a and d_999B_stream0_b - CubDebugExit(allocator.DeviceFree(d_999B_stream0_a)); - CubDebugExit(allocator.DeviceFree(d_999B_stream0_b)); - - // Check that we can now use both allocations in other_stream - CubDebugExit(cudaDeviceSynchronize()); - CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream_other_a, 999, other_stream)); - CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream_other_b, 999, other_stream)); - - // Check that that we have 2 live blocks on the initial GPU - AssertEquals(allocator.live_blocks.size(), 2); - - // Check that that we have no cached block on the initial GPU - AssertEquals(allocator.cached_blocks.size(), 0); - - // Run some big kernel in other_stream - EmptyKernel<<<32000, 512, 1024 * 8, other_stream>>>(); - - // Free d_999B_stream_other_a and d_999B_stream_other_b - CubDebugExit(allocator.DeviceFree(d_999B_stream_other_a)); - CubDebugExit(allocator.DeviceFree(d_999B_stream_other_b)); - - // Check that we can now use both allocations in stream 0 after synchronizing the device and destroying the other stream - CubDebugExit(cudaDeviceSynchronize()); - CubDebugExit(cudaStreamDestroy(other_stream)); - CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_a, 999, 0)); - CubDebugExit(allocator.DeviceAllocate((void **) &d_999B_stream0_b, 999, 0)); - - // Check that that we have 2 live blocks on the initial GPU - AssertEquals(allocator.live_blocks.size(), 2); - - // Check that that we have no cached block on the initial GPU - AssertEquals(allocator.cached_blocks.size(), 0); - - // Free d_999B_stream0_a and d_999B_stream0_b - CubDebugExit(allocator.DeviceFree(d_999B_stream0_a)); - CubDebugExit(allocator.DeviceFree(d_999B_stream0_b)); - - // Free all cached - CubDebugExit(allocator.FreeAllCached()); - - // - // Test1 - // - - // Allocate 5 bytes on the current gpu - char *d_5B; - CubDebugExit(allocator.DeviceAllocate((void **) &d_5B, 5)); - - // Check that that we have zero free bytes cached on the initial GPU - AssertEquals(allocator.cached_bytes[initial_gpu].free, 0); - - // Check that that we have 1 live block on the initial GPU - AssertEquals(allocator.live_blocks.size(), 1); - - // - // Test2 - // - - // Allocate 4096 bytes on the current gpu - char *d_4096B; - CubDebugExit(allocator.DeviceAllocate((void **) &d_4096B, 4096)); - - // Check that that we have 2 live blocks on the initial GPU - AssertEquals(allocator.live_blocks.size(), 2); - - // - // Test3 - // - - // DeviceFree d_5B - CubDebugExit(allocator.DeviceFree(d_5B)); - - // Check that that we have min_bin_bytes free bytes cached on the initial gpu - AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes); - - // Check that that we have 1 live block on the initial GPU - AssertEquals(allocator.live_blocks.size(), 1); - - // Check that that we have 1 cached block on the initial GPU - AssertEquals(allocator.cached_blocks.size(), 1); - - // - // Test4 - // - - // DeviceFree d_4096B - CubDebugExit(allocator.DeviceFree(d_4096B)); - - // Check that that we have the 4096 + min_bin free bytes cached on the initial gpu - AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes + 4096); - - // Check that that we have 0 live block on the initial GPU - AssertEquals(allocator.live_blocks.size(), 0); - - // Check that that we have 2 cached block on the initial GPU - AssertEquals(allocator.cached_blocks.size(), 2); - - // - // Test5 - // - - // Allocate 768 bytes on the current gpu - char *d_768B; - CubDebugExit(allocator.DeviceAllocate((void **) &d_768B, 768)); - - // Check that that we have the min_bin free bytes cached on the initial gpu (4096 was reused) - AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes); - - // Check that that we have 1 live block on the initial GPU - AssertEquals(allocator.live_blocks.size(), 1); - - // Check that that we have 1 cached block on the initial GPU - AssertEquals(allocator.cached_blocks.size(), 1); - - // - // Test6 - // - - // Allocate max_cached_bytes on the current gpu - char *d_max_cached; - CubDebugExit(allocator.DeviceAllocate((void **) &d_max_cached, allocator.max_cached_bytes)); - - // DeviceFree d_max_cached - CubDebugExit(allocator.DeviceFree(d_max_cached)); - - // Check that that we have the min_bin free bytes cached on the initial gpu (max cached was not returned because we went over) - AssertEquals(allocator.cached_bytes[initial_gpu].free, allocator.min_bin_bytes); - - // Check that that we have 1 live block on the initial GPU - AssertEquals(allocator.live_blocks.size(), 1); - - // Check that that we still have 1 cached block on the initial GPU - AssertEquals(allocator.cached_blocks.size(), 1); - - // - // Test7 - // - - // Free all cached blocks on all GPUs - CubDebugExit(allocator.FreeAllCached()); - - // Check that that we have 0 bytes cached on the initial GPU - AssertEquals(allocator.cached_bytes[initial_gpu].free, 0); - - // Check that that we have 0 cached blocks across all GPUs - AssertEquals(allocator.cached_blocks.size(), 0); - - // Check that that still we have 1 live block across all GPUs - AssertEquals(allocator.live_blocks.size(), 1); - - // - // Test8 - // - - // Allocate max cached bytes + 1 on the current gpu - char *d_max_cached_plus; - CubDebugExit(allocator.DeviceAllocate((void **) &d_max_cached_plus, allocator.max_cached_bytes + 1)); - - // DeviceFree max cached bytes - CubDebugExit(allocator.DeviceFree(d_max_cached_plus)); - - // DeviceFree d_768B - CubDebugExit(allocator.DeviceFree(d_768B)); - - unsigned int power; - size_t rounded_bytes; - allocator.NearestPowerOf(power, rounded_bytes, allocator.bin_growth, 768); - - // Check that that we have 4096 free bytes cached on the initial gpu - AssertEquals(allocator.cached_bytes[initial_gpu].free, rounded_bytes); - - // Check that that we have 1 cached blocks across all GPUs - AssertEquals(allocator.cached_blocks.size(), 1); - - // Check that that still we have 0 live block across all GPUs - AssertEquals(allocator.live_blocks.size(), 0); - -#ifndef CUB_CDP - // BUG: find out why these tests fail when one GPU is CDP compliant and the other is not - - if (num_gpus > 1) - { - printf("\nRunning multi-gpu tests...\n"); fflush(stdout); - - // - // Test9 - // - - // Allocate 768 bytes on the next gpu - int next_gpu = (initial_gpu + 1) % num_gpus; - char *d_768B_2; - CubDebugExit(allocator.DeviceAllocate(next_gpu, (void **) &d_768B_2, 768)); - - // DeviceFree d_768B on the next gpu - CubDebugExit(allocator.DeviceFree(next_gpu, d_768B_2)); - - // Re-allocate 768 bytes on the next gpu - CubDebugExit(allocator.DeviceAllocate(next_gpu, (void **) &d_768B_2, 768)); - - // Re-free d_768B on the next gpu - CubDebugExit(allocator.DeviceFree(next_gpu, d_768B_2)); - - // Check that that we have 4096 free bytes cached on the initial gpu - AssertEquals(allocator.cached_bytes[initial_gpu].free, rounded_bytes); - - // Check that that we have 4096 free bytes cached on the second gpu - AssertEquals(allocator.cached_bytes[next_gpu].free, rounded_bytes); - - // Check that that we have 2 cached blocks across all GPUs - AssertEquals(allocator.cached_blocks.size(), 2); - - // Check that that still we have 0 live block across all GPUs - AssertEquals(allocator.live_blocks.size(), 0); - } -#endif // CUB_CDP - - // - // Performance - // - - printf("\nCPU Performance (%d timing iterations, %d bytes):\n", timing_iterations, timing_bytes); - fflush(stdout); fflush(stderr); - - // CPU performance comparisons vs cached. Allocate and free a 1MB block 2000 times - CpuTimer cpu_timer; - char *d_1024MB = NULL; - allocator.debug = false; - - // Prime the caching allocator and the kernel - CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes)); - CubDebugExit(allocator.DeviceFree(d_1024MB)); - cub::EmptyKernel<<<1, 32>>>(); - - // CUDA - cpu_timer.Start(); - for (int i = 0; i < timing_iterations; ++i) - { - CubDebugExit(cudaMalloc((void **) &d_1024MB, timing_bytes)); - CubDebugExit(cudaFree(d_1024MB)); - } - cpu_timer.Stop(); - float cuda_malloc_elapsed_millis = cpu_timer.ElapsedMillis(); - - // CUB - cpu_timer.Start(); - for (int i = 0; i < timing_iterations; ++i) - { - CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes)); - CubDebugExit(allocator.DeviceFree(d_1024MB)); - } - cpu_timer.Stop(); - float cub_calloc_elapsed_millis = cpu_timer.ElapsedMillis(); - - printf("\t CUB CachingDeviceAllocator allocation CPU speedup: %.2f (avg cudaMalloc %.4f ms vs. avg DeviceAllocate %.4f ms)\n", - cuda_malloc_elapsed_millis / cub_calloc_elapsed_millis, - cuda_malloc_elapsed_millis / timing_iterations, - cub_calloc_elapsed_millis / timing_iterations); - - // GPU performance comparisons. Allocate and free a 1MB block 2000 times - GpuTimer gpu_timer; - - printf("\nGPU Performance (%d timing iterations, %d bytes):\n", timing_iterations, timing_bytes); - fflush(stdout); fflush(stderr); - - // Kernel-only - gpu_timer.Start(); - for (int i = 0; i < timing_iterations; ++i) - { - cub::EmptyKernel<<<1, 32>>>(); - } - gpu_timer.Stop(); - float cuda_empty_elapsed_millis = gpu_timer.ElapsedMillis(); - - // CUDA - gpu_timer.Start(); - for (int i = 0; i < timing_iterations; ++i) - { - CubDebugExit(cudaMalloc((void **) &d_1024MB, timing_bytes)); - cub::EmptyKernel<<<1, 32>>>(); - CubDebugExit(cudaFree(d_1024MB)); - } - gpu_timer.Stop(); - cuda_malloc_elapsed_millis = gpu_timer.ElapsedMillis() - cuda_empty_elapsed_millis; - - // CUB - gpu_timer.Start(); - for (int i = 0; i < timing_iterations; ++i) - { - CubDebugExit(allocator.DeviceAllocate((void **) &d_1024MB, timing_bytes)); - cub::EmptyKernel<<<1, 32>>>(); - CubDebugExit(allocator.DeviceFree(d_1024MB)); - } - gpu_timer.Stop(); - cub_calloc_elapsed_millis = gpu_timer.ElapsedMillis() - cuda_empty_elapsed_millis; - - printf("\t CUB CachingDeviceAllocator allocation GPU speedup: %.2f (avg cudaMalloc %.4f ms vs. avg DeviceAllocate %.4f ms)\n", - cuda_malloc_elapsed_millis / cub_calloc_elapsed_millis, - cuda_malloc_elapsed_millis / timing_iterations, - cub_calloc_elapsed_millis / timing_iterations); - - -#endif - - printf("Success\n"); - - return 0; -} - diff --git a/ml-xgboost/cub/test/test_block_histogram.cu b/ml-xgboost/cub/test/test_block_histogram.cu deleted file mode 100644 index 4a8ab45..0000000 --- a/ml-xgboost/cub/test/test_block_histogram.cu +++ /dev/null @@ -1,310 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of BlockHistogram utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -int g_timing_iterations = 0; -int g_repeat = 0; -CachingDeviceAllocator g_allocator(true); - - -//--------------------------------------------------------------------- -// Test kernels -//--------------------------------------------------------------------- - -/** - * BlockHistogram test kernel. - */ -template < - int BINS, - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockHistogramAlgorithm ALGORITHM, - typename T, - typename HistoCounter> -__global__ void BlockHistogramKernel( - T *d_samples, - HistoCounter *d_histogram) -{ - // Parameterize BlockHistogram type for our thread block - typedef BlockHistogram BlockHistogram; - - // Allocate temp storage in shared memory - __shared__ typename BlockHistogram::TempStorage temp_storage; - - // Per-thread tile data - T data[ITEMS_PER_THREAD]; - LoadDirectStriped(threadIdx.x, d_samples, data); - - // Test histo (writing directly to histogram buffer in global) - BlockHistogram(temp_storage).Histogram(data, d_histogram); -} - - -/** - * Initialize problem (and solution) - */ -template < - int BINS, - typename SampleT> -void Initialize( - GenMode gen_mode, - SampleT *h_samples, - int *h_histograms_linear, - int num_samples) -{ - // Init bins - for (int bin = 0; bin < BINS; ++bin) - { - h_histograms_linear[bin] = 0; - } - - if (g_verbose) printf("Samples: \n"); - - // Initialize interleaved channel samples and histogram them correspondingly - for (int i = 0; i < num_samples; ++i) - { - InitValue(gen_mode, h_samples[i], i); - h_samples[i] %= BINS; - - if (g_verbose) std::cout << CoutCast(h_samples[i]) << ", "; - - h_histograms_linear[h_samples[i]]++; - } - - if (g_verbose) printf("\n\n"); -} - - -/** - * Test BlockHistogram - */ -template < - typename SampleT, - int BINS, - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockHistogramAlgorithm ALGORITHM> -void Test( - GenMode gen_mode) -{ - int num_samples = BLOCK_THREADS * ITEMS_PER_THREAD; - - printf("cub::BlockHistogram %s %d %s samples (%dB), %d bins, %d threads, gen-mode %s\n", - (ALGORITHM == BLOCK_HISTO_SORT) ? "BLOCK_HISTO_SORT" : "BLOCK_HISTO_ATOMIC", - num_samples, - typeid(SampleT).name(), - (int) sizeof(SampleT), - BINS, - BLOCK_THREADS, - (gen_mode == RANDOM) ? "RANDOM" : (gen_mode == INTEGER_SEED) ? "SEQUENTIAL" : "HOMOGENOUS"); - fflush(stdout); - - // Allocate host arrays - SampleT *h_samples = new SampleT[num_samples]; - int *h_reference = new int[BINS]; - - // Initialize problem - Initialize(gen_mode, h_samples, h_reference, num_samples); - - // Allocate problem device arrays - SampleT *d_samples = NULL; - int *d_histogram = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * num_samples)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram, sizeof(int) * BINS)); - - // Initialize/clear device arrays - CubDebugExit(cudaMemcpy(d_samples, h_samples, sizeof(SampleT) * num_samples, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_histogram, 0, sizeof(int) * BINS)); - - // Run kernel - BlockHistogramKernel<<<1, BLOCK_THREADS>>>( - d_samples, - d_histogram); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults((int*) h_reference, d_histogram, BINS, g_verbose, g_verbose); - printf("\t%s\n\n", compare ? "FAIL" : "PASS"); - - // Flush any stdout/stderr - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - fflush(stdout); - fflush(stderr); - - // Cleanup - if (h_samples) delete[] h_samples; - if (h_reference) delete[] h_reference; - if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples)); - if (d_histogram) CubDebugExit(g_allocator.DeviceFree(d_histogram)); - - // Correctness asserts - AssertEquals(0, compare); -} - - -/** - * Test different sample distributions - */ -template < - typename SampleT, - int BINS, - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockHistogramAlgorithm ALGORITHM> -void Test() -{ - Test(UNIFORM); - Test(INTEGER_SEED); - Test(RANDOM); -} - - -/** - * Test different ALGORITHM - */ -template < - typename SampleT, - int BINS, - int BLOCK_THREADS, - int ITEMS_PER_THREAD> -void Test() -{ - Test(); - Test(); -} - - -/** - * Test different ITEMS_PER_THREAD - */ -template < - typename SampleT, - int BINS, - int BLOCK_THREADS> -void Test() -{ - Test(); - Test(); -} - - -/** - * Test different BLOCK_THREADS - */ -template < - typename SampleT, - int BINS> -void Test() -{ - Test(); - Test(); - Test(); -} - - - - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("repeat", g_repeat); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--device=] " - "[--repeat=]" - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - -#ifdef QUICK_TEST - - // Compile/run quick tests - Test(RANDOM); - Test(RANDOM); - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - Test(); - Test(); - Test(); - } - -#endif - - return 0; -} - - - diff --git a/ml-xgboost/cub/test/test_block_load_store.cu b/ml-xgboost/cub/test/test_block_load_store.cu deleted file mode 100644 index 4bb139a..0000000 --- a/ml-xgboost/cub/test/test_block_load_store.cu +++ /dev/null @@ -1,549 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of BlockLoad and BlockStore utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "test_util.h" - -using namespace cub; - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -CachingDeviceAllocator g_allocator(true); - - -//--------------------------------------------------------------------- -// Test kernels -//--------------------------------------------------------------------- - - -/** - * Test load/store kernel. - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockLoadAlgorithm LOAD_ALGORITHM, - BlockStoreAlgorithm STORE_ALGORITHM, - typename InputIteratorT, - typename OutputIteratorT> -__launch_bounds__ (BLOCK_THREADS, 1) -__global__ void Kernel( - InputIteratorT d_in, - OutputIteratorT d_out_unguarded, - OutputIteratorT d_out_guarded, - int num_items) -{ - enum - { - TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD - }; - - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - // Threadblock load/store abstraction types - typedef BlockLoad BlockLoad; - typedef BlockStore BlockStore; - - // Shared memory type for this threadblock - union TempStorage - { - typename BlockLoad::TempStorage load; - typename BlockStore::TempStorage store; - }; - - // Allocate temp storage in shared memory - __shared__ TempStorage temp_storage; - - // Threadblock work bounds - int block_offset = blockIdx.x * TILE_SIZE; - int guarded_elements = num_items - block_offset; - - // Tile of items - OutputT data[ITEMS_PER_THREAD]; - - // Load data - BlockLoad(temp_storage.load).Load(d_in + block_offset, data); - - __syncthreads(); - - // Store data - BlockStore(temp_storage.store).Store(d_out_unguarded + block_offset, data); - - __syncthreads(); - - // reset data - #pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - data[ITEM] = OutputT(); - - __syncthreads(); - - // Load data - BlockLoad(temp_storage.load).Load(d_in + block_offset, data, guarded_elements); - - __syncthreads(); - - // Store data - BlockStore(temp_storage.store).Store(d_out_guarded + block_offset, data, guarded_elements); -} - - -//--------------------------------------------------------------------- -// Host testing subroutines -//--------------------------------------------------------------------- - - -/** - * Test load/store variants - */ -template < - typename T, - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockLoadAlgorithm LOAD_ALGORITHM, - BlockStoreAlgorithm STORE_ALGORITHM, - typename InputIteratorT, - typename OutputIteratorT> -void TestKernel( - T *h_in, - InputIteratorT d_in, - OutputIteratorT d_out_unguarded_itr, - OutputIteratorT d_out_guarded_itr, - T *d_out_unguarded_ptr, - T *d_out_guarded_ptr, - int grid_size, - int guarded_elements) -{ - int compare; - - int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD; - - // Test with discard output iterator - typedef typename std::iterator_traits::difference_type OffsetT; - DiscardOutputIterator discard_itr; - - Kernel - <<>>( - d_in, - discard_itr, - discard_itr, - guarded_elements); - - // Test with regular output iterator - Kernel - <<>>( - d_in, - d_out_unguarded_itr, - d_out_guarded_itr, - guarded_elements); - - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Check results - compare = CompareDeviceResults(h_in, d_out_guarded_ptr, guarded_elements, g_verbose, g_verbose); - printf("\tGuarded: %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Check results - compare = CompareDeviceResults(h_in, d_out_unguarded_ptr, unguarded_elements, g_verbose, g_verbose); - printf("\tUnguarded: %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); -} - - -/** - * Test native pointer. Specialized for sufficient resources - */ -template < - typename T, - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockLoadAlgorithm LOAD_ALGORITHM, - BlockStoreAlgorithm STORE_ALGORITHM> -void TestNative( - int grid_size, - float fraction_valid, - Int2Type sufficient_resources) -{ - int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD; - int guarded_elements = int(fraction_valid * float(unguarded_elements)); - - // Allocate host arrays - T *h_in = (T*) malloc(unguarded_elements * sizeof(T)); - - // Allocate device arrays - T *d_in = NULL; - T *d_out_unguarded = NULL; - T *d_out_guarded = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * unguarded_elements)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_unguarded, sizeof(T) * unguarded_elements)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_guarded, sizeof(T) * guarded_elements)); - CubDebugExit(cudaMemset(d_out_unguarded, 0, sizeof(T) * unguarded_elements)); - CubDebugExit(cudaMemset(d_out_guarded, 0, sizeof(T) * guarded_elements)); - - // Initialize problem on host and device - for (int i = 0; i < unguarded_elements; ++i) - { - InitValue(INTEGER_SEED, h_in[i], i); - } - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * unguarded_elements, cudaMemcpyHostToDevice)); - - printf("TestNative " - "grid_size(%d) " - "guarded_elements(%d) " - "unguarded_elements(%d) " - "BLOCK_THREADS(%d) " - "ITEMS_PER_THREAD(%d) " - "LOAD_ALGORITHM(%d) " - "STORE_ALGORITHM(%d) " - "sizeof(T)(%d)\n", - grid_size, guarded_elements, unguarded_elements, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, (int) sizeof(T)); - - TestKernel( - h_in, - (T const *) d_in, // Test const - d_out_unguarded, - d_out_guarded, - d_out_unguarded, - d_out_guarded, - grid_size, - guarded_elements); - - // Cleanup - if (h_in) free(h_in); - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out_unguarded) CubDebugExit(g_allocator.DeviceFree(d_out_unguarded)); - if (d_out_guarded) CubDebugExit(g_allocator.DeviceFree(d_out_guarded)); -} - - -/** - * Test native pointer. Specialized for insufficient resources - */ -template < - typename T, - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockLoadAlgorithm LOAD_ALGORITHM, - BlockStoreAlgorithm STORE_ALGORITHM> -void TestNative( - int grid_size, - float fraction_valid, - Int2Type sufficient_resources) -{} - - -/** - * Test iterator. Specialized for sufficient resources. - */ -template < - typename T, - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockLoadAlgorithm LOAD_ALGORITHM, - BlockStoreAlgorithm STORE_ALGORITHM, - CacheLoadModifier LOAD_MODIFIER, - CacheStoreModifier STORE_MODIFIER> -void TestIterator( - int grid_size, - float fraction_valid, - Int2Type sufficient_resources) -{ - int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD; - int guarded_elements = int(fraction_valid * float(unguarded_elements)); - - // Allocate host arrays - T *h_in = (T*) malloc(unguarded_elements * sizeof(T)); - - // Allocate device arrays - T *d_in = NULL; - T *d_out_unguarded = NULL; - T *d_out_guarded = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * unguarded_elements)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_unguarded, sizeof(T) * unguarded_elements)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_guarded, sizeof(T) * guarded_elements)); - CubDebugExit(cudaMemset(d_out_unguarded, 0, sizeof(T) * unguarded_elements)); - CubDebugExit(cudaMemset(d_out_guarded, 0, sizeof(T) * guarded_elements)); - - // Initialize problem on host and device - for (int i = 0; i < unguarded_elements; ++i) - { - InitValue(INTEGER_SEED, h_in[i], i); - } - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * unguarded_elements, cudaMemcpyHostToDevice)); - - printf("TestIterator " - "grid_size(%d) " - "guarded_elements(%d) " - "unguarded_elements(%d) " - "BLOCK_THREADS(%d) " - "ITEMS_PER_THREAD(%d) " - "LOAD_ALGORITHM(%d) " - "STORE_ALGORITHM(%d) " - "LOAD_MODIFIER(%d) " - "STORE_MODIFIER(%d) " - "sizeof(T)(%d)\n", - grid_size, guarded_elements, unguarded_elements, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, LOAD_MODIFIER, STORE_MODIFIER, (int) sizeof(T)); - - TestKernel( - h_in, - CacheModifiedInputIterator(d_in), - CacheModifiedOutputIterator(d_out_unguarded), - CacheModifiedOutputIterator(d_out_guarded), - d_out_unguarded, - d_out_guarded, - grid_size, - guarded_elements); - - // Cleanup - if (h_in) free(h_in); - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out_unguarded) CubDebugExit(g_allocator.DeviceFree(d_out_unguarded)); - if (d_out_guarded) CubDebugExit(g_allocator.DeviceFree(d_out_guarded)); -} - -/** - * Test iterator. Specialized for insufficient resources. - */ -template < - typename T, - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockLoadAlgorithm LOAD_ALGORITHM, - BlockStoreAlgorithm STORE_ALGORITHM, - CacheLoadModifier LOAD_MODIFIER, - CacheStoreModifier STORE_MODIFIER> -void TestIterator( - int grid_size, - float fraction_valid, - Int2Type sufficient_resources) -{} - - -/** - * Evaluate different pointer access types - */ -template < - typename T, - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockLoadAlgorithm LOAD_ALGORITHM, - BlockStoreAlgorithm STORE_ALGORITHM> -void TestPointerType( - int grid_size, - float fraction_valid) -{ - // Threadblock load/store abstraction types - typedef BlockLoad BlockLoad; - typedef BlockStore BlockStore; - -#if defined(SM100) || defined(SM110) || defined(SM130) - static const bool sufficient_load_smem = sizeof(typename BlockLoad::TempStorage) <= 1024 * 16; - static const bool sufficient_store_smem = sizeof(typename BlockStore::TempStorage) <= 1024 * 16; - static const bool sufficient_threads = BLOCK_THREADS <= 512; -#else - static const bool sufficient_load_smem = sizeof(typename BlockLoad::TempStorage) <= 1024 * 48; - static const bool sufficient_store_smem = sizeof(typename BlockStore::TempStorage) <= 1024 * 48; - static const bool sufficient_threads = BLOCK_THREADS <= 1024; -#endif - - static const bool sufficient_resources = sufficient_load_smem && sufficient_store_smem && sufficient_threads; - - TestNative(grid_size, fraction_valid, Int2Type()); - TestIterator(grid_size, fraction_valid, Int2Type()); -} - - -/** - * Evaluate different time-slicing strategies - */ -template < - typename T, - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - BlockLoadAlgorithm LOAD_ALGORITHM, - BlockStoreAlgorithm STORE_ALGORITHM> -void TestSlicedStrategy( - int grid_size, - float fraction_valid) -{ - TestPointerType(grid_size, fraction_valid); - TestPointerType(grid_size, fraction_valid); -} - - - -/** - * Evaluate different load/store strategies (specialized for block sizes that are not a multiple of 32) - */ -template < - typename T, - int BLOCK_THREADS, - int ITEMS_PER_THREAD> -void TestStrategy( - int grid_size, - float fraction_valid, - Int2Type is_warp_multiple) -{ - TestPointerType(grid_size, fraction_valid); - TestPointerType(grid_size, fraction_valid); - TestPointerType(grid_size, fraction_valid); -} - - -/** - * Evaluate different load/store strategies (specialized for block sizes that are a multiple of 32) - */ -template < - typename T, - int BLOCK_THREADS, - int ITEMS_PER_THREAD> -void TestStrategy( - int grid_size, - float fraction_valid, - Int2Type is_warp_multiple) -{ - TestStrategy(grid_size, fraction_valid, Int2Type()); - TestPointerType(grid_size, fraction_valid); - TestPointerType(grid_size, fraction_valid); -} - - -/** - * Evaluate different register blocking - */ -template < - typename T, - int BLOCK_THREADS> -void TestItemsPerThread( - int grid_size, - float fraction_valid) -{ - Int2Type is_warp_multiple; - - TestStrategy(grid_size, fraction_valid, is_warp_multiple); - TestStrategy(grid_size, fraction_valid, is_warp_multiple); - TestStrategy(grid_size, fraction_valid, is_warp_multiple); - TestStrategy(grid_size, fraction_valid, is_warp_multiple); -} - - -/** - * Evaluate different threadblock sizes - */ -template -void TestThreads( - int grid_size, - float fraction_valid) -{ - TestItemsPerThread(grid_size, fraction_valid); - TestItemsPerThread(grid_size, fraction_valid); - TestItemsPerThread(grid_size, fraction_valid); - TestItemsPerThread(grid_size, fraction_valid); - TestItemsPerThread(grid_size, fraction_valid); -} - - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Get ptx version - int ptx_version; - CubDebugExit(PtxVersion(ptx_version)); - -#ifdef QUICK_TEST - - // Compile/run quick tests - TestNative< int, 64, 2, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE>(1, 0.8f, Int2Type()); - TestIterator< int, 64, 2, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE, LOAD_DEFAULT, STORE_DEFAULT>(1, 0.8f, Int2Type()); - -#else - - // Compile/run thorough tests - TestThreads(2, 0.8f); - TestThreads(2, 0.8f); - TestThreads(2, 0.8f); - TestThreads(2, 0.8f); - - if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted - TestThreads(2, 0.8f); - TestThreads(2, 0.8f); - TestThreads(2, 0.8f); - -#endif - - return 0; -} - - - diff --git a/ml-xgboost/cub/test/test_block_radix_sort.cu b/ml-xgboost/cub/test/test_block_radix_sort.cu deleted file mode 100644 index 36f8760..0000000 --- a/ml-xgboost/cub/test/test_block_radix_sort.cu +++ /dev/null @@ -1,717 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of BlockRadixSort utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include -#include - -#include -#include -#include -#include - -#include "test_util.h" - -using namespace cub; - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -CachingDeviceAllocator g_allocator(true); - - -//--------------------------------------------------------------------- -// Test kernels -//--------------------------------------------------------------------- - - -/// Specialized descending, blocked -> blocked -template -__device__ __forceinline__ void TestBlockSort( - typename BlockRadixSort::TempStorage &temp_storage, - Key (&keys)[ITEMS_PER_THREAD], - Value (&values)[ITEMS_PER_THREAD], - Key *d_keys, - Value *d_values, - int begin_bit, - int end_bit, - clock_t &stop, - Int2Type is_descending, - Int2Type is_blocked_output) -{ - BlockRadixSort(temp_storage).SortDescending(keys, values, begin_bit, end_bit); - stop = clock(); - StoreDirectBlocked(threadIdx.x, d_keys, keys); - StoreDirectBlocked(threadIdx.x, d_values, values); -} - -/// Specialized descending, blocked -> striped -template -__device__ __forceinline__ void TestBlockSort( - typename BlockRadixSort::TempStorage &temp_storage, - Key (&keys)[ITEMS_PER_THREAD], - Value (&values)[ITEMS_PER_THREAD], - Key *d_keys, - Value *d_values, - int begin_bit, - int end_bit, - clock_t &stop, - Int2Type is_descending, - Int2Type is_blocked_output) -{ - BlockRadixSort(temp_storage).SortDescendingBlockedToStriped(keys, values, begin_bit, end_bit); - stop = clock(); - StoreDirectStriped(threadIdx.x, d_keys, keys); - StoreDirectStriped(threadIdx.x, d_values, values); -} - -/// Specialized ascending, blocked -> blocked -template -__device__ __forceinline__ void TestBlockSort( - typename BlockRadixSort::TempStorage &temp_storage, - Key (&keys)[ITEMS_PER_THREAD], - Value (&values)[ITEMS_PER_THREAD], - Key *d_keys, - Value *d_values, - int begin_bit, - int end_bit, - clock_t &stop, - Int2Type is_descending, - Int2Type is_blocked_output) -{ - BlockRadixSort(temp_storage).Sort(keys, values, begin_bit, end_bit); - stop = clock(); - StoreDirectBlocked(threadIdx.x, d_keys, keys); - StoreDirectBlocked(threadIdx.x, d_values, values); -} - -/// Specialized ascending, blocked -> striped -template -__device__ __forceinline__ void TestBlockSort( - typename BlockRadixSort::TempStorage &temp_storage, - Key (&keys)[ITEMS_PER_THREAD], - Value (&values)[ITEMS_PER_THREAD], - Key *d_keys, - Value *d_values, - int begin_bit, - int end_bit, - clock_t &stop, - Int2Type is_descending, - Int2Type is_blocked_output) -{ - BlockRadixSort(temp_storage).SortBlockedToStriped(keys, values, begin_bit, end_bit); - stop = clock(); - StoreDirectStriped(threadIdx.x, d_keys, keys); - StoreDirectStriped(threadIdx.x, d_values, values); -} - - - -/** - * BlockRadixSort kernel - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int RADIX_BITS, - bool MEMOIZE_OUTER_SCAN, - BlockScanAlgorithm INNER_SCAN_ALGORITHM, - cudaSharedMemConfig SMEM_CONFIG, - int DESCENDING, - int BLOCKED_OUTPUT, - typename Key, - typename Value> -__launch_bounds__ (BLOCK_THREADS, 1) -__global__ void Kernel( - Key *d_keys, - Value *d_values, - int begin_bit, - int end_bit, - clock_t *d_elapsed) -{ - // Threadblock load/store abstraction types - typedef BlockRadixSort< - Key, - BLOCK_THREADS, - ITEMS_PER_THREAD, - Value, - RADIX_BITS, - MEMOIZE_OUTER_SCAN, - INNER_SCAN_ALGORITHM, - SMEM_CONFIG> - BlockRadixSortT; - - // Allocate temp storage in shared memory - __shared__ typename BlockRadixSortT::TempStorage temp_storage; - - // Items per thread - Key keys[ITEMS_PER_THREAD]; - Value values[ITEMS_PER_THREAD]; - - LoadDirectBlocked(threadIdx.x, d_keys, keys); - LoadDirectBlocked(threadIdx.x, d_values, values); - - // Start cycle timer - clock_t stop; - clock_t start = clock(); - - TestBlockSort( - temp_storage, keys, values, d_keys, d_values, begin_bit, end_bit, stop, Int2Type(), Int2Type()); - - // Store time - if (threadIdx.x == 0) - *d_elapsed = (start > stop) ? start - stop : stop - start; -} - - - -//--------------------------------------------------------------------- -// Host testing subroutines -//--------------------------------------------------------------------- - - -/** - * Simple key-value pairing - */ -template < - typename Key, - typename Value, - bool IS_FLOAT = (Traits::CATEGORY == FLOATING_POINT)> -struct Pair -{ - Key key; - Value value; - - bool operator<(const Pair &b) const - { - return (key < b.key); - } -}; - -/** - * Simple key-value pairing (specialized for floating point types) - */ -template -struct Pair -{ - Key key; - Value value; - - bool operator<(const Pair &b) const - { - if (key < b.key) - return true; - - if (key > b.key) - return false; - - // Key in unsigned bits - typedef typename Traits::UnsignedBits UnsignedBits; - - // Return true if key is negative zero and b.key is positive zero - UnsignedBits key_bits = *reinterpret_cast(const_cast(&key)); - UnsignedBits b_key_bits = *reinterpret_cast(const_cast(&b.key)); - UnsignedBits HIGH_BIT = Traits::HIGH_BIT; - - return ((key_bits & HIGH_BIT) != 0) && ((b_key_bits & HIGH_BIT) == 0); - } -}; - - -/** - * Initialize key-value sorting problem. - */ -template -void Initialize( - GenMode gen_mode, - Key *h_keys, - Value *h_values, - Key *h_reference_keys, - Value *h_reference_values, - int num_items, - int entropy_reduction, - int begin_bit, - int end_bit) -{ - Pair *h_pairs = new Pair[num_items]; - - for (int i = 0; i < num_items; ++i) - { - InitValue(gen_mode, h_keys[i], i); - - RandomBits(h_values[i]); - - // Mask off unwanted portions - int num_bits = end_bit - begin_bit; - if ((begin_bit > 0) || (end_bit < sizeof(Key) * 8)) - { - unsigned long long base = 0; - memcpy(&base, &h_keys[i], sizeof(Key)); - base &= ((1ull << num_bits) - 1) << begin_bit; - memcpy(&h_keys[i], &base, sizeof(Key)); - } - - h_pairs[i].key = h_keys[i]; - h_pairs[i].value = h_values[i]; - } - - if (DESCENDING) std::reverse(h_pairs, h_pairs + num_items); - std::stable_sort(h_pairs, h_pairs + num_items); - if (DESCENDING) std::reverse(h_pairs, h_pairs + num_items); - - for (int i = 0; i < num_items; ++i) - { - h_reference_keys[i] = h_pairs[i].key; - h_reference_values[i] = h_pairs[i].value; - } - - delete[] h_pairs; -} - - - - -/** - * Test BlockRadixSort kernel - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int RADIX_BITS, - bool MEMOIZE_OUTER_SCAN, - BlockScanAlgorithm INNER_SCAN_ALGORITHM, - cudaSharedMemConfig SMEM_CONFIG, - bool DESCENDING, - bool BLOCKED_OUTPUT, - typename Key, - typename Value> -void TestDriver( - GenMode gen_mode, - int entropy_reduction, - int begin_bit, - int end_bit) -{ - enum - { - TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD, - KEYS_ONLY = Equals::VALUE, - }; - - // Allocate host arrays - Key *h_keys = new Key[TILE_SIZE]; - Key *h_reference_keys = new Key[TILE_SIZE]; - Value *h_values = new Value[TILE_SIZE]; - Value *h_reference_values = new Value[TILE_SIZE]; - - // Allocate device arrays - Key *d_keys = NULL; - Value *d_values = NULL; - clock_t *d_elapsed = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys, sizeof(Key) * TILE_SIZE)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values, sizeof(Value) * TILE_SIZE)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t))); - - // Initialize problem and solution on host - Initialize(gen_mode, h_keys, h_values, h_reference_keys, h_reference_values, - TILE_SIZE, entropy_reduction, begin_bit, end_bit); - - // Copy problem to device - CubDebugExit(cudaMemcpy(d_keys, h_keys, sizeof(Key) * TILE_SIZE, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_values, h_values, sizeof(Value) * TILE_SIZE, cudaMemcpyHostToDevice)); - - printf("%s " - "BLOCK_THREADS(%d) " - "ITEMS_PER_THREAD(%d) " - "RADIX_BITS(%d) " - "MEMOIZE_OUTER_SCAN(%d) " - "INNER_SCAN_ALGORITHM(%d) " - "SMEM_CONFIG(%d) " - "DESCENDING(%d) " - "BLOCKED_OUTPUT(%d) " - "sizeof(Key)(%d) " - "sizeof(Value)(%d) " - "gen_mode(%d), " - "entropy_reduction(%d) " - "begin_bit(%d) " - "end_bit(%d), " - "samples(%d)\n", - ((KEYS_ONLY) ? "Keys-only" : "Key-value"), - BLOCK_THREADS, - ITEMS_PER_THREAD, - RADIX_BITS, - MEMOIZE_OUTER_SCAN, - INNER_SCAN_ALGORITHM, - SMEM_CONFIG, - DESCENDING, - BLOCKED_OUTPUT, - (int) sizeof(Key), - (int) sizeof(Value), - gen_mode, - entropy_reduction, - begin_bit, - end_bit, - g_num_rand_samples); - - // Set shared memory config - cudaDeviceSetSharedMemConfig(SMEM_CONFIG); - - // Run kernel - Kernel<<<1, BLOCK_THREADS>>>( - d_keys, d_values, begin_bit, end_bit, d_elapsed); - - // Flush kernel output / errors - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Check keys results - printf("\tKeys: "); - int compare = CompareDeviceResults(h_reference_keys, d_keys, TILE_SIZE, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Check value results - if (!KEYS_ONLY) - { - printf("\tValues: "); - int compare = CompareDeviceResults(h_reference_values, d_values, TILE_SIZE, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - } - printf("\n"); - - printf("\tElapsed clocks: "); - DisplayDeviceResults(d_elapsed, 1); - printf("\n"); - - // Cleanup - if (h_keys) delete[] h_keys; - if (h_reference_keys) delete[] h_reference_keys; - if (h_values) delete[] h_values; - if (h_reference_values) delete[] h_reference_values; - if (d_keys) CubDebugExit(g_allocator.DeviceFree(d_keys)); - if (d_values) CubDebugExit(g_allocator.DeviceFree(d_values)); - if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); -} - - -/** - * Test driver (valid tile size <= MAX_SMEM_BYTES) - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int RADIX_BITS, - bool MEMOIZE_OUTER_SCAN, - BlockScanAlgorithm INNER_SCAN_ALGORITHM, - cudaSharedMemConfig SMEM_CONFIG, - bool DESCENDING, - bool BLOCKED_OUTPUT, - typename Key, - typename Value> -void TestValid(Int2Type fits_smem_capacity) -{ - // Iterate begin_bit - for (int begin_bit = 0; begin_bit <= 1; begin_bit++) - { - // Iterate end bit - for (int end_bit = begin_bit + 1; end_bit <= sizeof(Key) * 8; end_bit = end_bit * 2 + begin_bit) - { - // Uniform key distribution - TestDriver( - UNIFORM, 0, begin_bit, end_bit); - - // Sequential key distribution - TestDriver( - INTEGER_SEED, 0, begin_bit, end_bit); - - // Iterate random with entropy_reduction - for (int entropy_reduction = 0; entropy_reduction <= 9; entropy_reduction += 3) - { - TestDriver( - RANDOM, entropy_reduction, begin_bit, end_bit); - } - } - } -} - - -/** - * Test driver (invalid tile size) - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int RADIX_BITS, - bool MEMOIZE_OUTER_SCAN, - BlockScanAlgorithm INNER_SCAN_ALGORITHM, - cudaSharedMemConfig SMEM_CONFIG, - bool DESCENDING, - bool BLOCKED_OUTPUT, - typename Key, - typename Value> -void TestValid(Int2Type fits_smem_capacity) -{} - - -/** - * Test ascending/descending and to-blocked/to-striped - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int RADIX_BITS, - bool MEMOIZE_OUTER_SCAN, - BlockScanAlgorithm INNER_SCAN_ALGORITHM, - cudaSharedMemConfig SMEM_CONFIG, - typename Key, - typename Value> -void Test() -{ - // Check size of smem storage for the target arch to make sure it will fit - typedef BlockRadixSort BlockRadixSortT; - -#if defined(SM100) || defined(SM110) || defined(SM130) - Int2Type fits_smem_capacity; -#else - Int2Type<(sizeof(typename BlockRadixSortT::TempStorage) <= 48 * 1024)> fits_smem_capacity; -#endif - - // Sort-ascending, to-striped - TestValid(fits_smem_capacity); - - // Sort-descending, to-blocked - TestValid(fits_smem_capacity); - - // Not necessary -// TestValid(fits_smem_capacity); -// TestValid(fits_smem_capacity); -} - - -/** - * Test value type and smem config - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int RADIX_BITS, - bool MEMOIZE_OUTER_SCAN, - BlockScanAlgorithm INNER_SCAN_ALGORITHM, - typename Key> -void TestKeys() -{ - // Test keys-only sorting with both smem configs - Test(); // Keys-only (4-byte smem bank config) -#if !defined(SM100) && !defined(SM110) && !defined(SM130) && !defined(SM200) - Test(); // Keys-only (8-byte smem bank config) -#endif -} - - -/** - * Test value type and smem config - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int RADIX_BITS, - bool MEMOIZE_OUTER_SCAN, - BlockScanAlgorithm INNER_SCAN_ALGORITHM, - typename Key> -void TestKeysAndPairs() -{ - // Test pairs sorting with only 4-byte configs - Test(); // With small-values - Test(); // With same-values - Test(); // With large values -} - - -/** - * Test key type - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int RADIX_BITS, - bool MEMOIZE_OUTER_SCAN, - BlockScanAlgorithm INNER_SCAN_ALGORITHM> -void Test() -{ - // Get ptx version - int ptx_version; - CubDebugExit(PtxVersion(ptx_version)); - -#ifdef TEST_KEYS_ONLY - - // Test unsigned types with keys-only - TestKeys(); - TestKeys(); - TestKeys(); - TestKeys(); - TestKeys(); - -#else - - // Test signed and fp types with paired values - TestKeysAndPairs(); - TestKeysAndPairs(); - TestKeysAndPairs(); - TestKeysAndPairs(); - TestKeysAndPairs(); - TestKeysAndPairs(); - if (ptx_version > 120) - { - // Don't check doubles on PTX120 or below because they're down-converted - TestKeysAndPairs(); - } - -#endif -} - - -/** - * Test inner scan algorithm - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int RADIX_BITS, - bool MEMOIZE_OUTER_SCAN> -void Test() -{ - Test(); - Test(); -} - - -/** - * Test outer scan algorithm - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int RADIX_BITS> -void Test() -{ - Test(); - Test(); -} - - -/** - * Test radix bits - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD> -void Test() -{ - Test(); - Test(); - Test(); -} - - -/** - * Test items per thread - */ -template -void Test() -{ - Test(); -#if defined(SM100) || defined(SM110) || defined(SM130) - // Open64 compiler can't handle the number of test cases -#else - Test(); -#endif - Test(); -} - - - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - -#ifdef QUICK_TEST - - { - typedef float T; - TestDriver<32, 4, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(INTEGER_SEED, 0, 0, sizeof(T) * 8); - } -/* - // Compile/run quick tests - typedef unsigned int T; - TestDriver<64, 17, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8); - TestDriver<96, 8, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8); - TestDriver<128, 2, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8); -*/ - -#else - - // Compile/run thorough tests - Test<32>(); - Test<64>(); - Test<160>(); - - -#endif // QUICK_TEST - - return 0; -} - - - diff --git a/ml-xgboost/cub/test/test_block_reduce.cu b/ml-xgboost/cub/test/test_block_reduce.cu deleted file mode 100644 index 2ba9edd..0000000 --- a/ml-xgboost/cub/test/test_block_reduce.cu +++ /dev/null @@ -1,822 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of BlockReduce utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include - -#include -#include - -#include -#include -#include -#include -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -int g_repeat = 0; -CachingDeviceAllocator g_allocator(true); - - - -//--------------------------------------------------------------------- -// Test kernels -//--------------------------------------------------------------------- - - -/// Generic reduction (full, 1) -template -__device__ __forceinline__ T DeviceTest( - BlockReduceT &block_reduce, T (&data)[1], ReductionOp &reduction_op) -{ - return block_reduce.Reduce(data[0], reduction_op); -} - -/// Generic reduction (full, ITEMS_PER_THREAD) -template -__device__ __forceinline__ T DeviceTest( - BlockReduceT &block_reduce, T (&data)[ITEMS_PER_THREAD], ReductionOp &reduction_op) -{ - return block_reduce.Reduce(data, reduction_op); -} - -/// Generic reduction (partial, 1) -template -__device__ __forceinline__ T DeviceTest( - BlockReduceT &block_reduce, T &data, ReductionOp &reduction_op, int valid_threads) -{ - return block_reduce.Reduce(data, reduction_op, valid_threads); -} - -/// Sum reduction (full, 1) -template -__device__ __forceinline__ T DeviceTest( - BlockReduceT &block_reduce, T (&data)[1], Sum &reduction_op) -{ - return block_reduce.Sum(data[0]); -} - -/// Sum reduction (full, ITEMS_PER_THREAD) -template -__device__ __forceinline__ T DeviceTest( - BlockReduceT &block_reduce, T (&data)[ITEMS_PER_THREAD], Sum &reduction_op) -{ - return block_reduce.Sum(data); -} - -/// Sum reduction (partial, 1) -template -__device__ __forceinline__ T DeviceTest( - BlockReduceT &block_reduce, T &data, Sum &reduction_op, int valid_threads) -{ - return block_reduce.Sum(data, valid_threads); -} - - -/** - * Test full-tile reduction kernel (where num_items is an even - * multiple of BLOCK_THREADS) - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - int ITEMS_PER_THREAD, - typename T, - typename ReductionOp> -__launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) -__global__ void FullTileReduceKernel( - T *d_in, - T *d_out, - ReductionOp reduction_op, - int tiles, - clock_t *d_elapsed) -{ - const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; - const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; - - // Cooperative threadblock reduction utility type (returns aggregate in thread 0) - typedef BlockReduce BlockReduceT; - - // Allocate temp storage in shared memory - __shared__ typename BlockReduceT::TempStorage temp_storage; - - int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); - - // Per-thread tile data - T data[ITEMS_PER_THREAD]; - - // Load first tile of data - int block_offset = 0; - - if (block_offset < TILE_SIZE * tiles) - { - LoadDirectBlocked(linear_tid, d_in + block_offset, data); - block_offset += TILE_SIZE; - - // Start cycle timer - clock_t start = clock(); - - // Cooperative reduce first tile - BlockReduceT block_reduce(temp_storage) ; - T block_aggregate = DeviceTest(block_reduce, data, reduction_op); - - // Stop cycle timer - #if CUB_PTX_ARCH == 100 - // Bug: recording stop clock causes mis-write of running prefix value - clock_t stop = 0; -#else - clock_t stop = clock(); -#endif // CUB_PTX_ARCH == 100 - clock_t elapsed = (start > stop) ? start - stop : stop - start; - - // Loop over input tiles - while (block_offset < TILE_SIZE * tiles) - { - // TestBarrier between threadblock reductions - __syncthreads(); - - // Load tile of data - LoadDirectBlocked(linear_tid, d_in + block_offset, data); - block_offset += TILE_SIZE; - - // Start cycle timer - clock_t start = clock(); - - // Cooperatively reduce the tile's aggregate - BlockReduceT block_reduce(temp_storage) ; - T tile_aggregate = DeviceTest(block_reduce, data, reduction_op); - - // Stop cycle timer -#if CUB_PTX_ARCH == 100 - // Bug: recording stop clock causes mis-write of running prefix value - clock_t stop = 0; -#else - clock_t stop = clock(); -#endif // CUB_PTX_ARCH == 100 - elapsed += (start > stop) ? start - stop : stop - start; - - // Reduce threadblock aggregate - block_aggregate = reduction_op(block_aggregate, tile_aggregate); - } - - // Store data - if (linear_tid == 0) - { - d_out[0] = block_aggregate; - *d_elapsed = elapsed; - } - } -} - - - -/** - * Test partial-tile reduction kernel (where num_items < BLOCK_THREADS) - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - typename T, - typename ReductionOp> -__launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) -__global__ void PartialTileReduceKernel( - T *d_in, - T *d_out, - int num_items, - ReductionOp reduction_op, - clock_t *d_elapsed) -{ - // Cooperative threadblock reduction utility type (returns aggregate only in thread-0) - typedef BlockReduce BlockReduceT; - - // Allocate temp storage in shared memory - __shared__ typename BlockReduceT::TempStorage temp_storage; - - int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); - - // Per-thread tile data - T partial; - - // Load partial tile data - if (linear_tid < num_items) - { - partial = d_in[linear_tid]; - } - - // Start cycle timer - clock_t start = clock(); - - // Cooperatively reduce the tile's aggregate - BlockReduceT block_reduce(temp_storage) ; - T tile_aggregate = DeviceTest(block_reduce, partial, reduction_op, num_items); - - // Stop cycle timer -#if CUB_PTX_ARCH == 100 - // Bug: recording stop clock causes mis-write of running prefix value - clock_t stop = 0; -#else - clock_t stop = clock(); -#endif // CUB_PTX_ARCH == 100 - - clock_t elapsed = (start > stop) ? start - stop : stop - start; - - // Store data - if (linear_tid == 0) - { - d_out[0] = tile_aggregate; - *d_elapsed = elapsed; - } -} - - -//--------------------------------------------------------------------- -// Host utility subroutines -//--------------------------------------------------------------------- - -/** - * Initialize problem (and solution) - */ -template < - typename T, - typename ReductionOp> -void Initialize( - GenMode gen_mode, - T *h_in, - T h_reference[1], - ReductionOp reduction_op, - int num_items) -{ - for (int i = 0; i < num_items; ++i) - { - InitValue(gen_mode, h_in[i], i); - if (i == 0) - h_reference[0] = h_in[0]; - else - h_reference[0] = reduction_op(h_reference[0], h_in[i]); - } - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n"); - } -} - - -//--------------------------------------------------------------------- -// Full tile test generation -//--------------------------------------------------------------------- - - -/** - * Test full-tile reduction. (Specialized for sufficient resources) - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - int ITEMS_PER_THREAD, - typename T, - typename ReductionOp> -void TestFullTile( - GenMode gen_mode, - int tiles, - ReductionOp reduction_op, - Int2Type sufficient_resources) -{ - const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; - const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; - - int num_items = TILE_SIZE * tiles; - - // Allocate host arrays - T *h_in = new T[num_items]; - T h_reference[1]; - - // Initialize problem - Initialize(gen_mode, h_in, h_reference, reduction_op, num_items); - - // Initialize/clear device arrays - T *d_in = NULL; - T *d_out = NULL; - clock_t *d_elapsed = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long))); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1)); - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * 1)); - - // Test multi-tile (unguarded) - printf("TestFullTile %s, %s, gen-mode %d, num_items(%d), BLOCK_THREADS(%d) (%d,%d,%d), ITEMS_PER_THREAD(%d), tiles(%d), %s (%d bytes) elements:\n", - Equals::VALUE ? "Sum" : "Max", - (ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : (ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY) ? "BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY" : "BLOCK_REDUCE_WARP_REDUCTIONS", - gen_mode, - num_items, - BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, - ITEMS_PER_THREAD, - tiles, - typeid(T).name(), - (int) sizeof(T)); - fflush(stdout); - - dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); - FullTileReduceKernel<<<1, block_dims>>>( - d_in, - d_out, - reduction_op, - tiles, - d_elapsed); - - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Copy out and display results - printf("\tReduction results: "); - int compare = CompareDeviceResults(h_reference, d_out, 1, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - printf("\tElapsed clocks: "); - DisplayDeviceResults(d_elapsed, 1); - - // Cleanup - if (h_in) delete[] h_in; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); -} - - -/** - * Test full-tile reduction. (Specialized for insufficient resources) - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - int ITEMS_PER_THREAD, - typename T, - typename ReductionOp> -void TestFullTile( - GenMode gen_mode, - int tiles, - ReductionOp reduction_op, - Int2Type sufficient_resources) -{} - - -/** - * Test full-tile reduction. - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - int ITEMS_PER_THREAD, - typename T, - typename ReductionOp> -void TestFullTile( - GenMode gen_mode, - int tiles, - ReductionOp reduction_op) -{ - // Check size of smem storage for the target arch to make sure it will fit - typedef BlockReduce BlockReduceT; - - enum - { -#if defined(SM100) || defined(SM110) || defined(SM130) - sufficient_smem = (sizeof(typename BlockReduceT::TempStorage) <= 16 * 1024), - sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512), -#else - sufficient_smem = (sizeof(typename BlockReduceT::TempStorage) <= 48 * 1024), - sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024), -#endif - }; - - TestFullTile(gen_mode, tiles, reduction_op, Int2Type()); -} - - -/** - * Run battery of tests for different threadblock dimensions - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - typename T, - typename ReductionOp> -void TestFullTile( - GenMode gen_mode, - int tiles, - ReductionOp reduction_op) -{ - TestFullTile(gen_mode, tiles, reduction_op); - TestFullTile(gen_mode, tiles, reduction_op); -} - -/** - * Run battery of tests for different thread items - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_THREADS, - typename T, - typename ReductionOp> -void TestFullTile( - GenMode gen_mode, - int tiles, - ReductionOp reduction_op) -{ - TestFullTile(gen_mode, tiles, reduction_op); - TestFullTile(gen_mode, tiles, reduction_op); -} - - -/** - * Run battery of full-tile tests for different numbers of tiles - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_THREADS, - typename T, - typename ReductionOp> -void TestFullTile( - GenMode gen_mode, - ReductionOp reduction_op) -{ - for (int tiles = 1; tiles < 3; tiles++) - { - TestFullTile(gen_mode, tiles, reduction_op); - } -} - - -//--------------------------------------------------------------------- -// Partial-tile test generation -//--------------------------------------------------------------------- - -/** - * Test partial-tile reduction. (Specialized for sufficient resources) - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - typename T, - typename ReductionOp> -void TestPartialTile( - GenMode gen_mode, - int num_items, - ReductionOp reduction_op, - Int2Type sufficient_resources) -{ - const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; - const int TILE_SIZE = BLOCK_THREADS; - - // Allocate host arrays - T *h_in = new T[num_items]; - T h_reference[1]; - - // Initialize problem - Initialize(gen_mode, h_in, h_reference, reduction_op, num_items); - - // Initialize/clear device arrays - T *d_in = NULL; - T *d_out = NULL; - clock_t *d_elapsed = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long))); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * TILE_SIZE)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1)); - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * 1)); - - printf("TestPartialTile %s, gen-mode %d, num_items(%d), BLOCK_THREADS(%d) (%d,%d,%d), %s (%d bytes) elements:\n", - (ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : (ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY) ? "BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY" : "BLOCK_REDUCE_WARP_REDUCTIONS", - gen_mode, - num_items, - BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, - typeid(T).name(), - (int) sizeof(T)); - fflush(stdout); - - dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); - PartialTileReduceKernel<<<1, block_dims>>>( - d_in, - d_out, - num_items, - reduction_op, - d_elapsed); - - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Copy out and display results - printf("\tReduction results: "); - int compare = CompareDeviceResults(h_reference, d_out, 1, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - printf("\tElapsed clocks: "); - DisplayDeviceResults(d_elapsed, 1); - - // Cleanup - if (h_in) delete[] h_in; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); -} - - - -/** - * Test partial-tile reduction (specialized for insufficient resources) - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - typename T, - typename ReductionOp> -void TestPartialTile( - GenMode gen_mode, - int num_items, - ReductionOp reduction_op, - Int2Type sufficient_resources) -{} - - -/** - * Run battery of partial-tile tests for different numbers of effective threads and thread dimensions - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - typename T, - typename ReductionOp> -void TestPartialTile( - GenMode gen_mode, - int num_items, - ReductionOp reduction_op) -{ - // Check size of smem storage for the target arch to make sure it will fit - typedef BlockReduce BlockReduceT; - - enum - { -#if defined(SM100) || defined(SM110) || defined(SM130) - sufficient_smem = sizeof(typename BlockReduceT::TempStorage) <= 16 * 1024, - sufficient_threads = (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512, -#else - sufficient_smem = sizeof(typename BlockReduceT::TempStorage) <= 48 * 1024, - sufficient_threads = (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024, -#endif - }; - - TestPartialTile(gen_mode, num_items, reduction_op, Int2Type()); -} - - - -/** - * Run battery of partial-tile tests for different numbers of effective threads and thread dimensions - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_THREADS, - typename T, - typename ReductionOp> -void TestPartialTile( - GenMode gen_mode, - ReductionOp reduction_op) -{ - for ( - int num_items = 1; - num_items < BLOCK_THREADS; - num_items += CUB_MAX(1, BLOCK_THREADS / 5)) - { - TestPartialTile(gen_mode, num_items, reduction_op); - TestPartialTile(gen_mode, num_items, reduction_op); - } -} - - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Run battery of full-tile tests for different gen modes - */ -template < - BlockReduceAlgorithm ALGORITHM, - int BLOCK_THREADS, - typename T, - typename ReductionOp> -void Test( - ReductionOp reduction_op) -{ - TestFullTile(UNIFORM, reduction_op); - TestPartialTile(UNIFORM, reduction_op); - - TestFullTile(INTEGER_SEED, reduction_op); - TestPartialTile(INTEGER_SEED, reduction_op); - - if (Traits::CATEGORY != FLOATING_POINT) - { - // Don't test randomly-generated floats b/c of stability - TestFullTile(RANDOM, reduction_op); - TestPartialTile(RANDOM, reduction_op); - } -} - - -/** - * Run battery of tests for different block-reduction algorithmic variants - */ -template < - int BLOCK_THREADS, - typename T, - typename ReductionOp> -void Test( - ReductionOp reduction_op) -{ -#ifdef TEST_RAKING - Test(reduction_op); - Test(reduction_op); -#endif -#ifdef TEST_WARP_REDUCTIONS - Test(reduction_op); -#endif -} - - -/** - * Run battery of tests for different block sizes - */ -template < - typename T, - typename ReductionOp> -void Test( - ReductionOp reduction_op) -{ - Test<7, T>(reduction_op); - Test<32, T>(reduction_op); - Test<63, T>(reduction_op); - Test<97, T>(reduction_op); - Test<128, T>(reduction_op); - Test<238, T>(reduction_op); -} - - -/** - * Run battery of tests for different block sizes - */ -template -void Test() -{ - Test(Sum()); - Test(Max()); -} - - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("repeat", g_repeat); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--repeat=]" - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Get ptx version - int ptx_version; - CubDebugExit(PtxVersion(ptx_version)); - -#ifdef QUICK_TEST - - // Compile/run quick tests - - - printf("\n full tile ------------------------\n\n"); - - TestFullTile(RANDOM, 1, Sum()); - TestFullTile(RANDOM, 1, Sum()); - TestFullTile(RANDOM, 1, Sum()); - - TestFullTile(RANDOM, 1, Sum()); - TestFullTile(RANDOM, 1, Sum()); - TestFullTile(RANDOM, 1, Sum()); - - printf("\n partial tile ------------------------\n\n"); - - TestPartialTile(RANDOM, 7, Sum()); - TestPartialTile(RANDOM, 7, Sum()); - TestPartialTile(RANDOM, 7, Sum()); - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - // primitives - Test(); - Test(); - Test(); - Test(); - if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted - Test(); - - Test(); - - // vector types - Test(); - Test(); - Test(); - Test(); - - Test(); - Test(); - Test(); - Test(); - - // Complex types - Test(); - Test(); - } - -#endif - - return 0; -} - - diff --git a/ml-xgboost/cub/test/test_block_scan.cu b/ml-xgboost/cub/test/test_block_scan.cu deleted file mode 100644 index ffefb0c..0000000 --- a/ml-xgboost/cub/test/test_block_scan.cu +++ /dev/null @@ -1,929 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of BlockScan utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "test_util.h" - - -using namespace cub; - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -int g_repeat = 0; -CachingDeviceAllocator g_allocator(true); - - -/** - * Primitive variant to test - */ -enum TestMode -{ - BASIC, - AGGREGATE, - PREFIX, -}; - - -/** - * Scan mode to test - */ -enum ScanMode -{ - EXCLUSIVE, - INCLUSIVE -}; - - -/** - * \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants) - */ -template -struct WrapperFunctor -{ - OpT op; - - WrapperFunctor(OpT op) : op(op) {} - - template - __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const - { - return op(a, b); - } -}; - - -/** - * Stateful prefix functor - */ -template < - typename T, - typename ScanOpT> -struct BlockPrefixCallbackOp -{ - int linear_tid; - T prefix; - ScanOpT scan_op; - - __device__ __forceinline__ - BlockPrefixCallbackOp(int linear_tid, T prefix, ScanOpT scan_op) : - linear_tid(linear_tid), - prefix(prefix), - scan_op(scan_op) - {} - - __device__ __forceinline__ - T operator()(T block_aggregate) - { - // For testing purposes - T retval = (linear_tid == 0) ? prefix : T(); - prefix = scan_op(prefix, block_aggregate); - return retval; - } -}; - - -//--------------------------------------------------------------------- -// Exclusive scan -//--------------------------------------------------------------------- - -/// Exclusive scan (BASIC, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.ExclusiveScan(data[0], data[0], initial_value, scan_op); -} - -/// Exclusive scan (BASIC, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.ExclusiveScan(data, data, initial_value, scan_op); -} - -/// Exclusive scan (AGGREGATE, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.ExclusiveScan(data[0], data[0], initial_value, scan_op, block_aggregate); -} - -/// Exclusive scan (AGGREGATE, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.ExclusiveScan(data, data, initial_value, scan_op, block_aggregate); -} - -/// Exclusive scan (PREFIX, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.ExclusiveScan(data[0], data[0], scan_op, prefix_op); -} - -/// Exclusive scan (PREFIX, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.ExclusiveScan(data, data, scan_op, prefix_op); -} - - -//--------------------------------------------------------------------- -// Exclusive sum -//--------------------------------------------------------------------- - -/// Exclusive sum (BASIC, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.ExclusiveSum(data[0], data[0]); -} - -/// Exclusive sum (BASIC, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.ExclusiveSum(data, data); -} - -/// Exclusive sum (AGGREGATE, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.ExclusiveSum(data[0], data[0], block_aggregate); -} - -/// Exclusive sum (AGGREGATE, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.ExclusiveSum(data, data, block_aggregate); -} - -/// Exclusive sum (PREFIX, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.ExclusiveSum(data[0], data[0], prefix_op); -} - -/// Exclusive sum (PREFIX, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.ExclusiveSum(data, data, prefix_op); -} - - -//--------------------------------------------------------------------- -// Inclusive scan -//--------------------------------------------------------------------- - -/// Inclusive scan (BASIC, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.InclusiveScan(data[0], data[0], scan_op); -} - -/// Inclusive scan (BASIC, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.InclusiveScan(data, data, scan_op); -} - -/// Inclusive scan (AGGREGATE, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.InclusiveScan(data[0], data[0], scan_op, block_aggregate); -} - -/// Inclusive scan (AGGREGATE, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.InclusiveScan(data, data, scan_op, block_aggregate); -} - -/// Inclusive scan (PREFIX, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.InclusiveScan(data[0], data[0], scan_op, prefix_op); -} - -/// Inclusive scan (PREFIX, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, ScanOpT &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, IsPrimitiveT is_primitive) -{ - block_scan.InclusiveScan(data, data, scan_op, prefix_op); -} - - -//--------------------------------------------------------------------- -// Inclusive sum -//--------------------------------------------------------------------- - -/// Inclusive sum (BASIC, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.InclusiveSum(data[0], data[0]); -} - -/// Inclusive sum (BASIC, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.InclusiveSum(data, data); -} - -/// Inclusive sum (AGGREGATE, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.InclusiveSum(data[0], data[0], block_aggregate); -} - -/// Inclusive sum (AGGREGATE, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.InclusiveSum(data, data, block_aggregate); -} - -/// Inclusive sum (PREFIX, 1) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[1], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.InclusiveSum(data[0], data[0], prefix_op); -} - -/// Inclusive sum (PREFIX, ITEMS_PER_THREAD) -template -__device__ __forceinline__ void DeviceTest( - BlockScanT &block_scan, T (&data)[ITEMS_PER_THREAD], T &initial_value, Sum &scan_op, T &block_aggregate, PrefixCallbackOp &prefix_op, - Int2Type scan_mode, Int2Type test_mode, Int2Type is_primitive) -{ - block_scan.InclusiveSum(data, data, prefix_op); -} - - - -//--------------------------------------------------------------------- -// Test kernels -//--------------------------------------------------------------------- - -/** - * BlockScan test kernel. - */ -template < - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - int ITEMS_PER_THREAD, - ScanMode SCAN_MODE, - TestMode TEST_MODE, - BlockScanAlgorithm ALGORITHM, - typename T, - typename ScanOpT> -__launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) -__global__ void BlockScanKernel( - T *d_in, - T *d_out, - T *d_aggregate, - ScanOpT scan_op, - T initial_value, - clock_t *d_elapsed) -{ - const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; - const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; - - // Parameterize BlockScan type for our thread block - typedef BlockScan BlockScanT; - - // Allocate temp storage in shared memory - __shared__ typename BlockScanT::TempStorage temp_storage; - - int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); - - // Per-thread tile data - T data[ITEMS_PER_THREAD]; - LoadDirectBlocked(linear_tid, d_in, data); - - __threadfence_block(); // workaround to prevent clock hoisting - clock_t start = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - // Test scan - T block_aggregate; - BlockScanT block_scan(temp_storage); - BlockPrefixCallbackOp prefix_op(linear_tid, initial_value, scan_op); - - DeviceTest(block_scan, data, initial_value, scan_op, block_aggregate, prefix_op, - Int2Type(), Int2Type(), Int2Type::PRIMITIVE>()); - - // Stop cycle timer - __threadfence_block(); // workaround to prevent clock hoisting - clock_t stop = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - // Store output - StoreDirectBlocked(linear_tid, d_out, data); - - // Store block_aggregate - if (TEST_MODE != BASIC) - d_aggregate[linear_tid] = block_aggregate; - - // Store prefix - if (TEST_MODE == PREFIX) - { - if (linear_tid == 0) - d_out[TILE_SIZE] = prefix_op.prefix; - } - - // Store time - if (linear_tid == 0) - *d_elapsed = (start > stop) ? start - stop : stop - start; -} - - - -//--------------------------------------------------------------------- -// Host utility subroutines -//--------------------------------------------------------------------- - -/** - * Initialize exclusive-scan problem (and solution) - */ -template -T Initialize( - GenMode gen_mode, - T *h_in, - T *h_reference, - int num_items, - ScanOpT scan_op, - T initial_value, - Int2Type) -{ - InitValue(gen_mode, h_in[0], 0); - - T block_aggregate = h_in[0]; - h_reference[0] = initial_value; - T inclusive = scan_op(initial_value, h_in[0]); - - for (int i = 1; i < num_items; ++i) - { - InitValue(gen_mode, h_in[i], i); - h_reference[i] = inclusive; - inclusive = scan_op(inclusive, h_in[i]); - block_aggregate = scan_op(block_aggregate, h_in[i]); - } - - return block_aggregate; -} - - -/** - * Initialize inclusive-scan problem (and solution) - */ -template -T Initialize( - GenMode gen_mode, - T *h_in, - T *h_reference, - int num_items, - ScanOpT scan_op, - T initial_value, - Int2Type) -{ - InitValue(gen_mode, h_in[0], 0); - - T block_aggregate = h_in[0]; - T inclusive = scan_op(initial_value, h_in[0]); - h_reference[0] = inclusive; - - for (int i = 1; i < num_items; ++i) - { - InitValue(gen_mode, h_in[i], i); - inclusive = scan_op(inclusive, h_in[i]); - block_aggregate = scan_op(block_aggregate, h_in[i]); - h_reference[i] = inclusive; - } - - return block_aggregate; -} - - -/** - * Test threadblock scan. (Specialized for sufficient resources) - */ -template < - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - int ITEMS_PER_THREAD, - ScanMode SCAN_MODE, - TestMode TEST_MODE, - BlockScanAlgorithm ALGORITHM, - typename ScanOpT, - typename T> -void Test( - GenMode gen_mode, - ScanOpT scan_op, - T initial_value, - Int2Type sufficient_resources) -{ - const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; - const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; - - // Allocate host arrays - T *h_in = new T[TILE_SIZE]; - T *h_reference = new T[TILE_SIZE]; - T *h_aggregate = new T[BLOCK_THREADS]; - - // Initialize problem - T block_aggregate = Initialize( - gen_mode, - h_in, - h_reference, - TILE_SIZE, - scan_op, - initial_value, - Int2Type()); - - // Test reference block_aggregate is returned in all threads - for (int i = 0; i < BLOCK_THREADS; ++i) - { - h_aggregate[i] = block_aggregate; - } - - // Run kernel - printf("Test-mode %d, gen-mode %d, policy %d, %s %s BlockScan, %d (%d,%d,%d) threadblock threads, %d items per thread, %d tile size, %s (%d bytes) elements:\n", - TEST_MODE, gen_mode, ALGORITHM, - (SCAN_MODE == INCLUSIVE) ? "Inclusive" : "Exclusive", typeid(ScanOpT).name(), - BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, - ITEMS_PER_THREAD, TILE_SIZE, - typeid(T).name(), (int) sizeof(T)); - fflush(stdout); - - // Initialize/clear device arrays - T *d_in = NULL; - T *d_out = NULL; - T *d_aggregate = NULL; - clock_t *d_elapsed = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long))); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * TILE_SIZE)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * (TILE_SIZE + 2))); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_aggregate, sizeof(T) * BLOCK_THREADS)); - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * TILE_SIZE, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * (TILE_SIZE + 1))); - CubDebugExit(cudaMemset(d_aggregate, 0, sizeof(T) * BLOCK_THREADS)); - - // Display input problem data - if (g_verbose) - { - printf("Input data: "); - for (int i = 0; i < TILE_SIZE; i++) - { - std::cout << CoutCast(h_in[i]) << ", "; - } - printf("\n\n"); - } - - // Run block_aggregate/prefix kernel - dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); - BlockScanKernel<<<1, block_dims>>>( - d_in, - d_out, - d_aggregate, - scan_op, - initial_value, - d_elapsed); - - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Copy out and display results - printf("\tScan results: "); - int compare = CompareDeviceResults(h_reference, d_out, TILE_SIZE, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - if (TEST_MODE == AGGREGATE) - { - // Copy out and display block_aggregate - printf("\tScan block aggregate: "); - compare = CompareDeviceResults(h_aggregate, d_aggregate, BLOCK_THREADS, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - } - - if (TEST_MODE == PREFIX) - { - // Copy out and display updated prefix - printf("\tScan running total: "); - T running_total = scan_op(initial_value, block_aggregate); - compare = CompareDeviceResults(&running_total, d_out + TILE_SIZE, 1, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - } - - printf("\tElapsed clocks: "); - DisplayDeviceResults(d_elapsed, 1); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (h_aggregate) delete[] h_aggregate; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_aggregate) CubDebugExit(g_allocator.DeviceFree(d_aggregate)); - if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); -} - - -/** - * Test threadblock scan. (Specialized for insufficient resources) - */ -template < - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - int ITEMS_PER_THREAD, - ScanMode SCAN_MODE, - TestMode TEST_MODE, - BlockScanAlgorithm ALGORITHM, - typename ScanOpT, - typename T> -void Test( - GenMode gen_mode, - ScanOpT scan_op, - T initial_value, - Int2Type sufficient_resources) -{} - - -/** - * Test threadblock scan. - */ -template < - int BLOCK_DIM_X, - int BLOCK_DIM_Y, - int BLOCK_DIM_Z, - int ITEMS_PER_THREAD, - ScanMode SCAN_MODE, - TestMode TEST_MODE, - BlockScanAlgorithm ALGORITHM, - typename ScanOpT, - typename T> -void Test( - GenMode gen_mode, - ScanOpT scan_op, - T initial_value) -{ - // Check size of smem storage for the target arch to make sure it will fit - typedef BlockScan BlockScanT; - - enum - { -#if defined(SM100) || defined(SM110) || defined(SM130) - sufficient_smem = (sizeof(typename BlockScanT::TempStorage) <= 16 * 1024), - sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512), -#else - sufficient_smem = (sizeof(typename BlockScanT::TempStorage) <= 16 * 1024), - sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024), -#endif - -#if defined(_WIN32) || defined(_WIN64) - // Accommodate ptxas crash bug (access violation) on Windows - special_skip = ((TEST_ARCH <= 130) && (Equals::VALUE) && (BLOCK_DIM_Z > 1)), -#else - special_skip = false, -#endif - sufficient_resources = (sufficient_smem && sufficient_threads && !special_skip), - }; - - Test( - gen_mode, scan_op, initial_value, Int2Type()); -} - - - -/** - * Run test for different threadblock dimensions - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - ScanMode SCAN_MODE, - TestMode TEST_MODE, - BlockScanAlgorithm ALGORITHM, - typename ScanOpT, - typename T> -void Test( - GenMode gen_mode, - ScanOpT scan_op, - T initial_value) -{ - Test(gen_mode, scan_op, initial_value); - Test(gen_mode, scan_op, initial_value); -} - - -/** - * Run test for different policy types - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - ScanMode SCAN_MODE, - TestMode TEST_MODE, - typename ScanOpT, - typename T> -void Test( - GenMode gen_mode, - ScanOpT scan_op, - T initial_value) -{ -#ifdef TEST_RAKING - Test(gen_mode, scan_op, initial_value); -#endif -#ifdef TEST_RAKING_MEMOIZE - Test(gen_mode, scan_op, initial_value); -#endif -#ifdef TEST_WARP_SCANS - Test(gen_mode, scan_op, initial_value); -#endif -} - - -/** - * Run tests for different primitive variants - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - typename ScanOpT, - typename T> -void Test( - GenMode gen_mode, - ScanOpT scan_op, - T identity, - T initial_value) -{ - // Exclusive (use identity as initial value because it will dispatch to *Sum variants that don't take initial values) - Test(gen_mode, scan_op, identity); - Test(gen_mode, scan_op, identity); - Test(gen_mode, scan_op, identity); - - // Exclusive (non-specialized, so we can use initial-value) - Test(gen_mode, WrapperFunctor(scan_op), initial_value); - Test(gen_mode, WrapperFunctor(scan_op), initial_value); - Test(gen_mode, WrapperFunctor(scan_op), initial_value); - - // Inclusive - Test(gen_mode, scan_op, identity); // This scan doesn't take an initial value - Test(gen_mode, scan_op, identity); // This scan doesn't take an initial value - Test(gen_mode, scan_op, initial_value); -} - - -/** - * Run tests for different problem-generation options - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - typename ScanOpT, - typename T> -void Test( - ScanOpT scan_op, - T identity, - T initial_value) -{ - Test(UNIFORM, scan_op, identity, initial_value); - Test(INTEGER_SEED, scan_op, identity, initial_value); - - // Don't test randomly-generated floats b/c of stability - if (Traits::CATEGORY != FLOATING_POINT) - Test(RANDOM, scan_op, identity, initial_value); -} - - -/** - * Run tests for different data types and scan ops - */ -template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD> -void Test() -{ - // Get ptx version - int ptx_version; - CubDebugExit(PtxVersion(ptx_version)); - - // primitive - Test(Sum(), (unsigned char) 0, (unsigned char) 99); - Test(Sum(), (unsigned short) 0, (unsigned short) 99); - Test(Sum(), (unsigned int) 0, (unsigned int) 99); - Test(Sum(), (unsigned long long) 0, (unsigned long long) 99); - Test(Sum(), (float) 0, (float) 99); - - // primitive (alternative scan op) - Test(Max(), std::numeric_limits::min(), (char) 99); - Test(Max(), std::numeric_limits::min(), (short) 99); - Test(Max(), std::numeric_limits::min(), (int) 99); - Test(Max(), std::numeric_limits::min(), (long long) 99); - - if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted - Test(Max(), std::numeric_limits::max() * -1, (double) 99); - - // vec-1 - Test(Sum(), make_uchar1(0), make_uchar1(17)); - - // vec-2 - Test(Sum(), make_uchar2(0, 0), make_uchar2(17, 21)); - Test(Sum(), make_ushort2(0, 0), make_ushort2(17, 21)); - Test(Sum(), make_uint2(0, 0), make_uint2(17, 21)); - Test(Sum(), make_ulonglong2(0, 0), make_ulonglong2(17, 21)); - - // vec-4 - Test(Sum(), make_char4(0, 0, 0, 0), make_char4(17, 21, 32, 85)); - Test(Sum(), make_short4(0, 0, 0, 0), make_short4(17, 21, 32, 85)); - Test(Sum(), make_int4(0, 0, 0, 0), make_int4(17, 21, 32, 85)); - Test(Sum(), make_longlong4(0, 0, 0, 0), make_longlong4(17, 21, 32, 85)); - - // complex - Test(Sum(), TestFoo::MakeTestFoo(0, 0, 0, 0), TestFoo::MakeTestFoo(17, 21, 32, 85)); - Test(Sum(), TestBar(0, 0), TestBar(17, 21)); - -} - - -/** - * Run tests for different items per thread - */ -template -void Test() -{ - Test(); - Test(); - Test(); -} - - - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("repeat", g_repeat); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--repeat=]" - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - -#ifdef QUICK_TEST - - Test<128, 1, 1, 1, EXCLUSIVE, AGGREGATE, BLOCK_SCAN_WARP_SCANS>(UNIFORM, Sum(), int(0)); - - // Compile/run quick tests - Test<128, 1, 1, 4, EXCLUSIVE, AGGREGATE, BLOCK_SCAN_WARP_SCANS>(UNIFORM, Sum(), int(0)); - Test<128, 1, 1, 4, EXCLUSIVE, AGGREGATE, BLOCK_SCAN_RAKING>(UNIFORM, Sum(), int(0)); - Test<128, 1, 1, 4, EXCLUSIVE, AGGREGATE, BLOCK_SCAN_RAKING_MEMOIZE>(UNIFORM, Sum(), int(0)); - - Test<128, 1, 1, 2, INCLUSIVE, PREFIX, BLOCK_SCAN_RAKING>(INTEGER_SEED, Sum(), TestFoo::MakeTestFoo(17, 21, 32, 85)); - Test<128, 1, 1, 1, EXCLUSIVE, AGGREGATE, BLOCK_SCAN_WARP_SCANS>(UNIFORM, Sum(), make_longlong4(17, 21, 32, 85)); - - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - // Run tests for different threadblock sizes - Test<17>(); - Test<32>(); - Test<62>(); - Test<65>(); -// Test<96>(); // TODO: file bug for UNREACHABLE error for Test<96, 9, BASIC, BLOCK_SCAN_RAKING>(UNIFORM, Sum(), NullType(), make_ulonglong2(17, 21)); - Test<128>(); - } - -#endif - - return 0; -} - - - - diff --git a/ml-xgboost/cub/test/test_device_histogram.cu b/ml-xgboost/cub/test/test_device_histogram.cu deleted file mode 100644 index d213db8..0000000 --- a/ml-xgboost/cub/test/test_device_histogram.cu +++ /dev/null @@ -1,1564 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of DeviceHistogram utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include -#include -#include - -#if defined(QUICK_TEST) || defined(QUICKER_TEST) - #include -#endif - -#include -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - - -// Dispatch types -enum Backend -{ - CUB, // CUB method - NPP, // NPP method - CDP, // GPU-based (dynamic parallelism) dispatch to CUB method -}; - - -bool g_verbose_input = false; -bool g_verbose = false; -int g_timing_iterations = 0; -int g_repeat = 0; -CachingDeviceAllocator g_allocator(true); - - - - -//--------------------------------------------------------------------- -// Dispatch to NPP histogram -//--------------------------------------------------------------------- - -#if defined(QUICK_TEST) || defined(QUICKER_TEST) - -/** - * Dispatch to single-channel 8b NPP histo-even - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t DispatchEven( - Int2Type<1> num_channels, - Int2Type<1> num_active_channels, - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - unsigned char *d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT *d_histogram[1], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_levels[i] - 1. - int num_levels[1], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT lower_level[1], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - LevelT upper_level[1], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest - cudaStream_t stream, - bool debug_synchronous) -{ - typedef unsigned char SampleT; - - cudaError_t error = cudaSuccess; - NppiSize oSizeROI = { - num_row_pixels, - num_rows - }; - - if (d_temp_storage_bytes == NULL) - { - int nDeviceBufferSize; - nppiHistogramEvenGetBufferSize_8u_C1R(oSizeROI, num_levels[0] ,&nDeviceBufferSize); - temp_storage_bytes = nDeviceBufferSize; - } - else - { - for (int i = 0; i < timing_timing_iterations; ++i) - { - // compute the histogram - nppiHistogramEven_8u_C1R( - d_samples, - row_stride_bytes, - oSizeROI, - d_histogram[0], - num_levels[0], - lower_level[0], - upper_level[0], - (Npp8u*) d_temp_storage); - } - } - - return error; -} - - -/** - * Dispatch to 3/4 8b NPP histo-even - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t DispatchEven( - Int2Type<4> num_channels, - Int2Type<3> num_active_channels, - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - unsigned char *d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT *d_histogram[3], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_levels[i] - 1. - int num_levels[3], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT lower_level[3], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - LevelT upper_level[3], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest - cudaStream_t stream, - bool debug_synchronous) -{ - typedef unsigned char SampleT; - - cudaError_t error = cudaSuccess; - NppiSize oSizeROI = { - num_row_pixels, - num_rows - }; - - if (d_temp_storage_bytes == NULL) - { - int nDeviceBufferSize; - nppiHistogramEvenGetBufferSize_8u_AC4R(oSizeROI, num_levels ,&nDeviceBufferSize); - temp_storage_bytes = nDeviceBufferSize; - } - else - { - for (int i = 0; i < timing_timing_iterations; ++i) - { - // compute the histogram - nppiHistogramEven_8u_AC4R( - d_samples, - row_stride_bytes, - oSizeROI, - d_histogram, - num_levels, - lower_level, - upper_level, - (Npp8u*) d_temp_storage); - } - } - - return error; -} - - -#endif // #if defined(QUICK_TEST) || defined(QUICKER_TEST) - - -//--------------------------------------------------------------------- -// Dispatch to different DeviceHistogram entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to CUB single histogram-even entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t DispatchEven( - Int2Type<1> num_channels, - Int2Type<1> num_active_channels, - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT *d_histogram[1], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_levels[i] - 1. - int num_levels[1], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT lower_level[1], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - LevelT upper_level[1], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest - cudaStream_t stream, - bool debug_synchronous) -{ - typedef typename std::iterator_traits::value_type SampleT; - - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceHistogram::HistogramEven( - d_temp_storage, - temp_storage_bytes, - (const SampleT *) d_samples, - d_histogram[0], - num_levels[0], - lower_level[0], - upper_level[0], - num_row_pixels, - num_rows, - row_stride_bytes, - stream, - debug_synchronous); - } - return error; -} - -/** - * Dispatch to CUB multi histogram-even entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t DispatchEven( - Int2Type num_channels, - Int2Type num_active_channels, - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT *d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_levels[i] - 1. - int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest - cudaStream_t stream, - bool debug_synchronous) -{ - typedef typename std::iterator_traits::value_type SampleT; - - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceHistogram::MultiHistogramEven( - d_temp_storage, - temp_storage_bytes, - (const SampleT *) d_samples, - d_histogram, - num_levels, - lower_level, - upper_level, - num_row_pixels, - num_rows, - row_stride_bytes, - stream, - debug_synchronous); - } - return error; -} - - -/** - * Dispatch to CUB single histogram-range entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t DispatchRange( - Int2Type<1> num_channels, - Int2Type<1> num_active_channels, - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT *d_histogram[1], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_levels[i] - 1. - int num_levels[1], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT *d_levels[1], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest - cudaStream_t stream, - bool debug_synchronous) -{ - typedef typename std::iterator_traits::value_type SampleT; - - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceHistogram::HistogramRange( - d_temp_storage, - temp_storage_bytes, - (const SampleT *) d_samples, - d_histogram[0], - num_levels[0], - d_levels[0], - num_row_pixels, - num_rows, - row_stride_bytes, - stream, - debug_synchronous); - } - return error; -} - - -/** - * Dispatch to CUB multi histogram-range entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t DispatchRange( - Int2Type num_channels, - Int2Type num_active_channels, - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). - CounterT *d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_levels[i] - 1. - int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT *d_levels[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest - cudaStream_t stream, - bool debug_synchronous) -{ - typedef typename std::iterator_traits::value_type SampleT; - - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceHistogram::MultiHistogramRange( - d_temp_storage, - temp_storage_bytes, - (const SampleT *) d_samples, - d_histogram, - num_levels, - d_levels, - num_row_pixels, - num_rows, - row_stride_bytes, - stream, - debug_synchronous); - } - return error; -} - - - -//--------------------------------------------------------------------- -// CUDA nested-parallelism test kernel -//--------------------------------------------------------------------- - -/** - * Simple wrapper kernel to invoke DeviceHistogram - * / -template -__global__ void CnpDispatchKernel( - Int2Type algorithm, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t temp_storage_bytes, - SampleT *d_samples, - SampleIteratorT d_sample_itr, - ArrayWrapper d_out_histograms, - int num_samples, - bool debug_synchronous) -{ -#ifndef CUB_CDP - *d_cdp_error = cudaErrorNotSupported; -#else - *d_cdp_error = Dispatch(algorithm, Int2Type(), timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_samples, d_sample_itr, d_out_histograms.array, num_samples, 0, debug_synchronous); - *d_temp_storage_bytes = temp_storage_bytes; -#endif -} - - -/ ** - * Dispatch to CDP kernel - * / -template -cudaError_t Dispatch( - Int2Type algorithm, - Int2Type use_cdp, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - SampleT *d_samples, - SampleIteratorT d_sample_itr, - CounterT *d_histograms[NUM_ACTIVE_CHANNELS], - int num_samples, - cudaStream_t stream, - bool debug_synchronous) -{ - // Setup array wrapper for histogram channel output (because we can't pass static arrays as kernel parameters) - ArrayWrapper d_histo_wrapper; - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - d_histo_wrapper.array[CHANNEL] = d_histograms[CHANNEL]; - - // Invoke kernel to invoke device-side dispatch - CnpDispatchKernel<<<1,1>>>(algorithm, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_samples, d_sample_itr, d_histo_wrapper, num_samples, debug_synchronous); - - // Copy out temp_storage_bytes - CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost)); - - // Copy out error - cudaError_t retval; - CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost)); - return retval; -} -*/ - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - -// Searches for bin given a list of bin-boundary levels -template -struct SearchTransform -{ - LevelT *levels; // Pointer to levels array - int num_levels; // Number of levels in array - - // Functor for converting samples to bin-ids (num_levels is returned if sample is out of range) - template - int operator()(SampleT sample) - { - int bin = int(std::upper_bound(levels, levels + num_levels, (LevelT) sample) - levels - 1); - if (bin < 0) - { - // Sample out of range - return num_levels; - } - return bin; - } -}; - - -// Scales samples to evenly-spaced bins -template -struct ScaleTransform -{ - int num_levels; // Number of levels in array - LevelT max; // Max sample level (exclusive) - LevelT min; // Min sample level (inclusive) - LevelT scale; // Bin scaling factor - - void Init( - int num_levels, // Number of levels in array - LevelT max, // Max sample level (exclusive) - LevelT min, // Min sample level (inclusive) - LevelT scale) // Bin scaling factor - { - this->num_levels = num_levels; - this->max = max; - this->min = min; - this->scale = scale; - } - - // Functor for converting samples to bin-ids (num_levels is returned if sample is out of range) - template - int operator()(SampleT sample) - { - if ((sample < min) || (sample >= max)) - { - // Sample out of range - return num_levels; - } - - return (int) ((((LevelT) sample) - min) / scale); - } -}; - -// Scales samples to evenly-spaced bins -template <> -struct ScaleTransform -{ - int num_levels; // Number of levels in array - float max; // Max sample level (exclusive) - float min; // Min sample level (inclusive) - float scale; // Bin scaling factor - - void Init( - int num_levels, // Number of levels in array - float max, // Max sample level (exclusive) - float min, // Min sample level (inclusive) - float scale) // Bin scaling factor - { - this->num_levels = num_levels; - this->max = max; - this->min = min; - this->scale = 1.0f / scale; - } - - // Functor for converting samples to bin-ids (num_levels is returned if sample is out of range) - template - int operator()(SampleT sample) - { - if ((sample < min) || (sample >= max)) - { - // Sample out of range - return num_levels; - } - - return (int) ((((float) sample) - min) * scale); - } -}; - - -/** - * Generate sample - */ -template -void Sample(T &datum, LevelT max_level, int entropy_reduction) -{ - unsigned int max = (unsigned int) -1; - unsigned int bits; - RandomBits(bits, entropy_reduction); - float fraction = (float(bits) / max); - - datum = (T) (fraction * max_level); -} - - -/** - * Initialize histogram problem (and solution) - */ -template < - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename LevelT, - typename SampleT, - typename CounterT, - typename TransformOp, - typename OffsetT> -void Initialize( - LevelT max_level, - int entropy_reduction, - SampleT *h_samples, - int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - TransformOp transform_op[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - CounterT *h_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channeli, the allocation length of d_histograms[i] should be num_levels[i] - 1. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest -{ - printf("Initializing... "); fflush(stdout); - - // Init bins - for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) - { - for (int bin = 0; bin < num_levels[CHANNEL] - 1; ++bin) - { - h_histogram[CHANNEL][bin] = 0; - } - } - - // Initialize samples - if (g_verbose_input) printf("Samples: \n"); - for (OffsetT row = 0; row < num_rows; ++row) - { - for (OffsetT pixel = 0; pixel < num_row_pixels; ++pixel) - { - if (g_verbose_input) printf("["); - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - // Sample offset - OffsetT offset = (row * (row_stride_bytes / sizeof(SampleT))) + (pixel * NUM_CHANNELS) + channel; - - // Init sample value - Sample(h_samples[offset], max_level, entropy_reduction); - if (g_verbose_input) - { - if (channel > 0) printf(", "); - std::cout << CoutCast(h_samples[offset]); - } - - // Update sample bin - int bin = transform_op[channel](h_samples[offset]); - if (g_verbose_input) printf(" (%d)", bin); fflush(stdout); - if ((bin >= 0) && (bin < num_levels[channel] - 1)) - { - // valid bin - h_histogram[channel][bin]++; - } - } - if (g_verbose_input) printf("]"); - } - if (g_verbose_input) printf("\n\n"); - } - - printf("Done\n"); fflush(stdout); -} - - -/** - * Test histogram-even - */ -template < - Backend BACKEND, - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename SampleT, - typename CounterT, - typename LevelT, - typename OffsetT> -void TestEven( - LevelT max_level, - int entropy_reduction, - int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest -{ - OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT)); - - printf("\n----------------------------\n"); - printf("%s cub::DeviceHistogramEven %d pixels (%d height, %d width, %d-byte row stride), %d %d-byte %s samples (entropy reduction %d), %s counters, %d/%d channels, max sample ", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == NPP) ? "NPP" : "CUB", - (int) (num_row_pixels * num_rows), - (int) num_rows, - (int) num_row_pixels, - (int) row_stride_bytes, - (int) total_samples, - (int) sizeof(SampleT), - typeid(SampleT).name(), - entropy_reduction, - typeid(CounterT).name(), - NUM_ACTIVE_CHANNELS, - NUM_CHANNELS); - std::cout << CoutCast(max_level) << "\n"; - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - std::cout << "\n\tChannel " << channel << ": " << num_levels[channel] - 1 << " bins [" << lower_level[channel] << ", " << upper_level[channel] << ")\n"; - fflush(stdout); - - // Allocate and initialize host and device data - - typedef SampleT Foo; // rename type to quelch gcc warnings (bug?) - SampleT* h_samples = new Foo[total_samples]; - CounterT* h_histogram[NUM_ACTIVE_CHANNELS]; - ScaleTransform transform_op[NUM_ACTIVE_CHANNELS]; - - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - int bins = num_levels[channel] - 1; - h_histogram[channel] = new CounterT[bins]; - - transform_op[channel].Init( - num_levels[channel], - upper_level[channel], - lower_level[channel], - ((upper_level[channel] - lower_level[channel]) / bins)); - } - - Initialize( - max_level, entropy_reduction, h_samples, num_levels, transform_op, h_histogram, num_row_pixels, num_rows, row_stride_bytes); - - // Allocate and initialize device data - - SampleT* d_samples = NULL; - CounterT* d_histogram[NUM_ACTIVE_CHANNELS]; - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * total_samples)); - CubDebugExit(cudaMemcpy(d_samples, h_samples, sizeof(SampleT) * total_samples, cudaMemcpyHostToDevice)); - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram[channel], sizeof(CounterT) * (num_levels[channel] - 1))); - CubDebugExit(cudaMemset(d_histogram[channel], 0, sizeof(CounterT) * (num_levels[channel] - 1))); - } - - // Allocate CDP device arrays - size_t *d_temp_storage_bytes = NULL; - cudaError_t *d_cdp_error = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1)); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - - DispatchEven( - Int2Type(), Int2Type(), Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, - d_samples, d_histogram, num_levels, lower_level, upper_level, - num_row_pixels, num_rows, row_stride_bytes, - 0, true); - - // Allocate temporary storage with "canary" zones - int canary_bytes = 256; - char canary_token = 8; - char* canary_zone = new char[canary_bytes]; - - memset(canary_zone, canary_token, canary_bytes); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes + (canary_bytes * 2))); - CubDebugExit(cudaMemset(d_temp_storage, canary_token, temp_storage_bytes + (canary_bytes * 2))); - - // Run warmup/correctness iteration - DispatchEven( - Int2Type(), Int2Type(), Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, - ((char *) d_temp_storage) + canary_bytes, temp_storage_bytes, - d_samples, d_histogram, num_levels, lower_level, upper_level, - num_row_pixels, num_rows, row_stride_bytes, - 0, true); - - // Check canary zones - int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose); - AssertEquals(0, error); - error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose); - AssertEquals(0, error); - - // Flush any stdout/stderr - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - fflush(stdout); - fflush(stderr); - - // Check for correctness (and display results, if specified) - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose); - printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n"); - error |= channel_error; - } - - // Performance - GpuTimer gpu_timer; - gpu_timer.Start(); - - DispatchEven( - Int2Type(), Int2Type(), Int2Type(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, - d_samples, d_histogram, num_levels, lower_level, upper_level, - num_row_pixels, num_rows, row_stride_bytes, - 0, false); - - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - // Display performance - if (g_timing_iterations > 0) - { - float avg_millis = elapsed_millis / g_timing_iterations; - float giga_rate = float(total_samples) / avg_millis / 1000.0f / 1000.0f; - float giga_bandwidth = giga_rate * sizeof(SampleT); - printf("\t%.3f avg ms, %.3f billion samples/s, %.3f billion bins/s, %.3f billion pixels/s, %.3f logical GB/s", - avg_millis, - giga_rate, - giga_rate * NUM_ACTIVE_CHANNELS / NUM_CHANNELS, - giga_rate / NUM_CHANNELS, - giga_bandwidth); - } - - printf("\n\n"); - - // Cleanup - if (h_samples) delete[] h_samples; - - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - if (h_histogram[channel]) - delete[] h_histogram[channel]; - - if (d_histogram[channel]) - CubDebugExit(g_allocator.DeviceFree(d_histogram[channel])); - } - - if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples)); - if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); - if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - // Correctness asserts - AssertEquals(0, error); -} - - - - - -/** - * Test histogram-range - */ -template < - Backend BACKEND, - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename SampleT, - typename CounterT, - typename LevelT, - typename OffsetT> -void TestRange( - LevelT max_level, - int entropy_reduction, - int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channeli is num_levels[i] - 1. - LevelT* levels[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. - OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest - OffsetT num_rows, ///< [in] The number of rows in the region of interest - OffsetT row_stride_bytes) ///< [in] The number of bytes between starts of consecutive rows in the region of interest -{ - OffsetT total_samples = num_rows * (row_stride_bytes / sizeof(SampleT)); - - printf("\n----------------------------\n"); - printf("%s cub::DeviceHistogramRange %d pixels (%d height, %d width, %d-byte row stride), %d %d-byte %s samples (entropy reduction %d), %s counters, %d/%d channels, max sample ", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == NPP) ? "NPP" : "CUB", - (int) (num_row_pixels * num_rows), - (int) num_rows, - (int) num_row_pixels, - (int) row_stride_bytes, - (int) total_samples, - (int) sizeof(SampleT), - typeid(SampleT).name(), - entropy_reduction, - typeid(CounterT).name(), - NUM_ACTIVE_CHANNELS, - NUM_CHANNELS); - std::cout << CoutCast(max_level) << "\n"; - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - printf("Channel %d: %d bins [", channel, num_levels[channel] - 1); - std::cout << levels[channel][0]; - for (int level = 1; level < num_levels[channel]; ++level) - std::cout << ", " << levels[channel][level]; - printf("]\n"); - } - fflush(stdout); - - // Allocate and initialize host and device data - typedef SampleT Foo; // rename type to quelch gcc warnings (bug?) - SampleT* h_samples = new Foo[total_samples]; - CounterT* h_histogram[NUM_ACTIVE_CHANNELS]; - SearchTransform transform_op[NUM_ACTIVE_CHANNELS]; - - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - transform_op[channel].levels = levels[channel]; - transform_op[channel].num_levels = num_levels[channel]; - - int bins = num_levels[channel] - 1; - h_histogram[channel] = new CounterT[bins]; - } - - Initialize( - max_level, entropy_reduction, h_samples, num_levels, transform_op, h_histogram, num_row_pixels, num_rows, row_stride_bytes); - - // Allocate and initialize device data - SampleT* d_samples = NULL; - LevelT* d_levels[NUM_ACTIVE_CHANNELS]; - CounterT* d_histogram[NUM_ACTIVE_CHANNELS]; - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_samples, sizeof(SampleT) * total_samples)); - CubDebugExit(cudaMemcpy(d_samples, h_samples, sizeof(SampleT) * total_samples, cudaMemcpyHostToDevice)); - - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_levels[channel], sizeof(LevelT) * num_levels[channel])); - CubDebugExit(cudaMemcpy(d_levels[channel], levels[channel], sizeof(LevelT) * num_levels[channel], cudaMemcpyHostToDevice)); - - int bins = num_levels[channel] - 1; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_histogram[channel], sizeof(CounterT) * bins)); - CubDebugExit(cudaMemset(d_histogram[channel], 0, sizeof(CounterT) * bins)); - } - - // Allocate CDP device arrays - size_t *d_temp_storage_bytes = NULL; - cudaError_t *d_cdp_error = NULL; - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1)); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - - DispatchRange( - Int2Type(), Int2Type(), Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, - d_samples, d_histogram, num_levels, d_levels, - num_row_pixels, num_rows, row_stride_bytes, - 0, true); - - // Allocate temporary storage with "canary" zones - int canary_bytes = 256; - char canary_token = 9; - char* canary_zone = new char[canary_bytes]; - - memset(canary_zone, canary_token, canary_bytes); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes + (canary_bytes * 2))); - CubDebugExit(cudaMemset(d_temp_storage, canary_token, temp_storage_bytes + (canary_bytes * 2))); - - // Run warmup/correctness iteration - DispatchRange( - Int2Type(), Int2Type(), Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, - ((char *) d_temp_storage) + canary_bytes, temp_storage_bytes, - d_samples, d_histogram, num_levels, d_levels, - num_row_pixels, num_rows, row_stride_bytes, - 0, true); - - // Check canary zones - int error = CompareDeviceResults(canary_zone, (char *) d_temp_storage, canary_bytes, true, g_verbose); - AssertEquals(0, error); - error = CompareDeviceResults(canary_zone, ((char *) d_temp_storage) + canary_bytes + temp_storage_bytes, canary_bytes, true, g_verbose); - AssertEquals(0, error); - - // Flush any stdout/stderr - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - fflush(stdout); - fflush(stderr); - - // Check for correctness (and display results, if specified) - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - int channel_error = CompareDeviceResults(h_histogram[channel], d_histogram[channel], num_levels[channel] - 1, true, g_verbose); - printf("\tChannel %d %s", channel, channel_error ? "FAIL" : "PASS\n"); - error |= channel_error; - } - - // Performance - GpuTimer gpu_timer; - gpu_timer.Start(); - - DispatchRange( - Int2Type(), Int2Type(), Int2Type(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, - d_samples, d_histogram, num_levels, d_levels, - num_row_pixels, num_rows, row_stride_bytes, - 0, false); - - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - // Display performance - if (g_timing_iterations > 0) - { - float avg_millis = elapsed_millis / g_timing_iterations; - float giga_rate = float(total_samples) / avg_millis / 1000.0f / 1000.0f; - float giga_bandwidth = giga_rate * sizeof(SampleT); - printf("\t%.3f avg ms, %.3f billion samples/s, %.3f billion bins/s, %.3f billion pixels/s, %.3f logical GB/s", - avg_millis, - giga_rate, - giga_rate * NUM_ACTIVE_CHANNELS / NUM_CHANNELS, - giga_rate / NUM_CHANNELS, - giga_bandwidth); - } - - printf("\n\n"); - - // Cleanup - if (h_samples) delete[] h_samples; - - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - if (h_histogram[channel]) - delete[] h_histogram[channel]; - - if (d_histogram[channel]) - CubDebugExit(g_allocator.DeviceFree(d_histogram[channel])); - - if (d_levels[channel]) - CubDebugExit(g_allocator.DeviceFree(d_levels[channel])); - } - - if (d_samples) CubDebugExit(g_allocator.DeviceFree(d_samples)); - if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); - if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - // Correctness asserts - AssertEquals(0, error); -} - - -/** - * Test histogram-even - */ -template < - Backend BACKEND, - typename SampleT, - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename CounterT, - typename LevelT, - typename OffsetT> -void TestEven( - OffsetT num_row_pixels, - OffsetT num_rows, - OffsetT row_stride_bytes, - int entropy_reduction, - int num_levels[NUM_ACTIVE_CHANNELS], - LevelT max_level, - int max_num_levels) -{ - LevelT lower_level[NUM_ACTIVE_CHANNELS]; - LevelT upper_level[NUM_ACTIVE_CHANNELS]; - - // Find smallest level increment - int max_bins = max_num_levels - 1; - LevelT min_level_increment = max_level / max_bins; - - // Set upper and lower levels for each channel - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - int num_bins = num_levels[channel] - 1; - lower_level[channel] = (max_level - (num_bins * min_level_increment)) / 2; - upper_level[channel] = (max_level + (num_bins * min_level_increment)) / 2; - } - - TestEven( - max_level, entropy_reduction, num_levels, lower_level, upper_level, num_row_pixels, num_rows, row_stride_bytes); -} - - - -/** - * Test histogram-range - */ -template < - Backend BACKEND, - typename SampleT, - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename CounterT, - typename LevelT, - typename OffsetT> -void TestRange( - OffsetT num_row_pixels, - OffsetT num_rows, - OffsetT row_stride_bytes, - int entropy_reduction, - int num_levels[NUM_ACTIVE_CHANNELS], - LevelT max_level, - int max_num_levels) -{ - // Find smallest level increment - int max_bins = max_num_levels - 1; - LevelT min_level_increment = max_level / max_bins; - - LevelT* levels[NUM_ACTIVE_CHANNELS]; - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - levels[channel] = new LevelT[num_levels[channel]]; - - int num_bins = num_levels[channel] - 1; - LevelT lower_level = (max_level - (num_bins * min_level_increment)) / 2; - - for (int level = 0; level < num_levels[channel]; ++level) - levels[channel][level] = lower_level + (level * min_level_increment); - } - - TestRange( - max_level, entropy_reduction, num_levels, levels, num_row_pixels, num_rows, row_stride_bytes); - - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - delete[] levels[channel]; - -} - - - -/** - * Test different entrypoints - */ -template < - typename SampleT, - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename CounterT, - typename LevelT, - typename OffsetT> -void Test( - OffsetT num_row_pixels, - OffsetT num_rows, - OffsetT row_stride_bytes, - int entropy_reduction, - int num_levels[NUM_ACTIVE_CHANNELS], - LevelT max_level, - int max_num_levels) -{ - TestEven( - num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels); - - TestRange( - num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels); -} - - -/** - * Test different number of levels - */ -template < - typename SampleT, - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename CounterT, - typename LevelT, - typename OffsetT> -void Test( - OffsetT num_row_pixels, - OffsetT num_rows, - OffsetT row_stride_bytes, - int entropy_reduction, - LevelT max_level, - int max_num_levels) -{ - int num_levels[NUM_ACTIVE_CHANNELS]; - - // All the same level - for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - num_levels[channel] = max_num_levels; - } - Test( - num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels); - - // All different levels - num_levels[0] = max_num_levels; - for (int channel = 1; channel < NUM_ACTIVE_CHANNELS; ++channel) - { - num_levels[channel] = (num_levels[channel - 1] / 2) + 1; - } - Test( - num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, max_num_levels); -} - - - -/** - * Test different entropy-levels - */ -template < - typename SampleT, - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename CounterT, - typename LevelT, - typename OffsetT> -void Test( - OffsetT num_row_pixels, - OffsetT num_rows, - OffsetT row_stride_bytes, - LevelT max_level, - int max_num_levels) -{ - Test( - num_row_pixels, num_rows, row_stride_bytes, 0, max_level, max_num_levels); - - Test( - num_row_pixels, num_rows, row_stride_bytes, -1, max_level, max_num_levels); - - Test( - num_row_pixels, num_rows, row_stride_bytes, 5, max_level, max_num_levels); -} - - -/** - * Test different row strides - */ -template < - typename SampleT, - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename CounterT, - typename LevelT, - typename OffsetT> -void Test( - OffsetT num_row_pixels, - OffsetT num_rows, - LevelT max_level, - int max_num_levels) -{ - OffsetT row_stride_bytes = num_row_pixels * NUM_CHANNELS * sizeof(SampleT); - - // No padding - Test( - num_row_pixels, num_rows, row_stride_bytes, max_level, max_num_levels); - - // 13 samples padding - Test( - num_row_pixels, num_rows, row_stride_bytes + (13 * sizeof(SampleT)), max_level, max_num_levels); -} - - -/** - * Test different problem sizes - */ -template < - typename SampleT, - int NUM_CHANNELS, - int NUM_ACTIVE_CHANNELS, - typename CounterT, - typename LevelT, - typename OffsetT> -void Test( - LevelT max_level, - int max_num_levels) -{ - // 0 images - Test( - OffsetT(1920), OffsetT(0), max_level, max_num_levels); - Test( - OffsetT(0), OffsetT(0), max_level, max_num_levels); - - // 1080 image - Test( - OffsetT(1920), OffsetT(1080), max_level, max_num_levels); - - // 720 image - Test( - OffsetT(1280), OffsetT(720), max_level, max_num_levels); - - // Sample different image sizes - for (OffsetT rows = 1; rows < 1000000; rows *= 100) - { - for (OffsetT cols = 1; cols < (1000000 / rows); cols *= 100) - { - Test( - cols, rows, max_level, max_num_levels); - } - } - - // Randomly select linear problem size between 1:10,000,000 - unsigned int max_int = (unsigned int) -1; - for (int i = 0; i < 10; ++i) - { - unsigned int num_items; - RandomBits(num_items); - num_items = (unsigned int) ((double(num_items) * double(10000000)) / double(max_int)); - num_items = CUB_MAX(1, num_items); - - Test( - OffsetT(num_items), 1, max_level, max_num_levels); - } -} - - - -/** - * Test different channel interleavings (valid specialiation) - */ -template < - typename SampleT, - typename CounterT, - typename LevelT, - typename OffsetT> -void TestChannels( - LevelT max_level, - int max_num_levels, - Int2Type is_valid_tag) -{ - Test(max_level, max_num_levels); - Test(max_level, max_num_levels); - Test(max_level, max_num_levels); - Test(max_level, max_num_levels); -} - - -/** - * Test different channel interleavings (invalid specialiation) - */ -template < - typename SampleT, - typename CounterT, - typename LevelT, - typename OffsetT> -void TestChannels( - LevelT max_level, - int max_num_levels, - Int2Type is_valid_tag) -{} - - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - - - - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_row_pixels = -1; - int entropy_reduction = 0; - int num_rows = 1; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - g_verbose_input = args.CheckCmdLineFlag("v2"); - args.GetCmdLineArgument("n", num_row_pixels); - - int row_stride_pixels = num_row_pixels; - - args.GetCmdLineArgument("rows", num_rows); - args.GetCmdLineArgument("stride", row_stride_pixels); - args.GetCmdLineArgument("i", g_timing_iterations); - args.GetCmdLineArgument("repeat", g_repeat); - args.GetCmdLineArgument("entropy", entropy_reduction); - - bool compare_npp = args.CheckCmdLineFlag("npp"); - - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--rows= " - "[--stride= " - "[--i= " - "[--device=] " - "[--repeat=]" - "[--entropy=]" - "[--v] " - "[--cdp]" - "[--npp]" - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Get ptx version - int ptx_version; - CubDebugExit(PtxVersion(ptx_version)); - - if (num_row_pixels < 0) - { - num_row_pixels = 1920 * 1080; - row_stride_pixels = num_row_pixels; - } - -#if defined(QUICKER_TEST) - - // Compile/run quick tests - { - // HistogramEven: unsigned char 256 bins - typedef unsigned char SampleT; - typedef int LevelT; - - LevelT max_level = 256; - int num_levels[1] = {257}; - int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1; - - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - if (compare_npp) - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - } - - -#elif defined(QUICK_TEST) - - // Compile/run quick tests - { - // HistogramEven: unsigned char 256 bins - typedef unsigned char SampleT; - typedef int LevelT; - - LevelT max_level = 256; - int num_levels[1] = {257}; - int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1; - - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - if (compare_npp) - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - } - - { - // HistogramEven: 4/4 multichannel Unsigned char 256 bins - typedef unsigned char SampleT; - typedef int LevelT; - - LevelT max_level = 256; - int num_levels[4] = {257, 257, 257, 257}; - int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4; - - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - } - - { - // HistogramEven: 3/4 multichannel Unsigned char 256 bins - typedef unsigned char SampleT; - typedef int LevelT; - - LevelT max_level = 256; - int num_levels[3] = {257, 257, 257}; - int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4; - - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - if (compare_npp) - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - } - - { - // HistogramEven: short [0,1024] 256 bins - typedef unsigned short SampleT; - typedef unsigned short LevelT; - - LevelT max_level = 1024; - int num_levels[1] = {257}; - int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1; - - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - } - - { - // HistogramEven: float [0,1.0] 256 bins - typedef float SampleT; - typedef float LevelT; - - LevelT max_level = 1.0; - int num_levels[1] = {257}; - int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1; - - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - } - - { - // HistogramEven: 3/4 multichannel float [0,1.0] 256 bins - typedef float SampleT; - typedef float LevelT; - - LevelT max_level = 1.0; - int num_levels[3] = {257, 257, 257}; - int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4; - - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - } - - { - // HistogramRange: signed char 256 bins - typedef signed char SampleT; - typedef int LevelT; - - LevelT max_level = 256; - int num_levels[1] = {257}; - int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1; - - TestRange(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - } - - { - // HistogramRange: 3/4 channel, unsigned char, varied bins (256, 128, 64) - typedef unsigned char SampleT; - typedef int LevelT; - - LevelT max_level = 256; - int num_levels[3] = {257, 129, 65}; - int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 4; - - TestRange(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - } - - if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted - { - // HistogramEven: double [0,1.0] 64 bins - typedef double SampleT; - typedef double LevelT; - - LevelT max_level = 1.0; - int num_levels[1] = {65}; - int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1; - - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - } - - { - // HistogramEven: short [0,1024] 512 bins - typedef unsigned short SampleT; - typedef unsigned short LevelT; - - LevelT max_level = 1024; - int num_levels[1] = {513}; - int row_stride_bytes = sizeof(SampleT) * row_stride_pixels * 1; - - TestEven(num_row_pixels, num_rows, row_stride_bytes, entropy_reduction, num_levels, max_level, num_levels[0]); - } - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - TestChannels (256, 256 + 1, Int2Type()); - TestChannels (256, 256 + 1, Int2Type()); - TestChannels (128, 128 + 1, Int2Type()); - TestChannels (8192, 8192 + 1, Int2Type()); - TestChannels (1.0, 256 + 1, Int2Type()); - - // Test down-conversion of size_t offsets to int - TestChannels (256, 256 + 1, Int2Type<(sizeof(size_t) != sizeof(int))>()); - } - -#endif - - return 0; -} - diff --git a/ml-xgboost/cub/test/test_device_radix_sort.cu b/ml-xgboost/cub/test/test_device_radix_sort.cu deleted file mode 100644 index af61773..0000000 --- a/ml-xgboost/cub/test/test_device_radix_sort.cu +++ /dev/null @@ -1,1273 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of DeviceRadixSort utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include -#include - -#include -#include -#include - -#include -#include -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -int g_timing_iterations = 0; -int g_repeat = 0; -CachingDeviceAllocator g_allocator(true); - -// Dispatch types -enum Backend -{ - CUB, // CUB method (allows overwriting of input) - CUB_NO_OVERWRITE, // CUB method (disallows overwriting of input) - - CUB_SEGMENTED, // CUB method (allows overwriting of input) - CUB_SEGMENTED_NO_OVERWRITE, // CUB method (disallows overwriting of input) - - THRUST, // Thrust method - CDP, // GPU-based (dynamic parallelism) dispatch to CUB method -}; - - -//--------------------------------------------------------------------- -// Dispatch to different DeviceRadixSort entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to CUB sorting entrypoint (specialized for ascending) - */ -template -CUB_RUNTIME_FUNCTION -__forceinline__ -cudaError_t Dispatch( - Int2Type is_descending, - Int2Type dispatch_to, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - cudaStream_t stream, - bool debug_synchronous) -{ - return DeviceRadixSort::SortPairs( - d_temp_storage, temp_storage_bytes, - d_keys, d_values, - num_items, begin_bit, end_bit, stream, debug_synchronous); -} - -/** - * Dispatch to CUB_NO_OVERWRITE sorting entrypoint (specialized for ascending) - */ -template -CUB_RUNTIME_FUNCTION -__forceinline__ -cudaError_t Dispatch( - Int2Type is_descending, - Int2Type dispatch_to, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - cudaStream_t stream, - bool debug_synchronous) -{ - KeyT const *const_keys_itr = d_keys.Current(); - ValueT const *const_values_itr = d_values.Current(); - - cudaError_t retval = DeviceRadixSort::SortPairs( - d_temp_storage, temp_storage_bytes, - const_keys_itr, d_keys.Alternate(), const_values_itr, d_values.Alternate(), - num_items, begin_bit, end_bit, stream, debug_synchronous); - - d_keys.selector ^= 1; - d_values.selector ^= 1; - return retval; -} - -/** - * Dispatch to CUB sorting entrypoint (specialized for descending) - */ -template -CUB_RUNTIME_FUNCTION -__forceinline__ -cudaError_t Dispatch( - Int2Type is_descending, - Int2Type dispatch_to, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - cudaStream_t stream, - bool debug_synchronous) -{ - return DeviceRadixSort::SortPairsDescending( - d_temp_storage, temp_storage_bytes, - d_keys, d_values, - num_items, begin_bit, end_bit, stream, debug_synchronous); -} - - -/** - * Dispatch to CUB_NO_OVERWRITE sorting entrypoint (specialized for descending) - */ -template -CUB_RUNTIME_FUNCTION -__forceinline__ -cudaError_t Dispatch( - Int2Type is_descending, - Int2Type dispatch_to, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - cudaStream_t stream, - bool debug_synchronous) -{ - KeyT const *const_keys_itr = d_keys.Current(); - ValueT const *const_values_itr = d_values.Current(); - - cudaError_t retval = DeviceRadixSort::SortPairsDescending( - d_temp_storage, temp_storage_bytes, - const_keys_itr, d_keys.Alternate(), const_values_itr, d_values.Alternate(), - num_items, begin_bit, end_bit, stream, debug_synchronous); - - d_keys.selector ^= 1; - d_values.selector ^= 1; - return retval; -} - -//--------------------------------------------------------------------- -// Dispatch to different DeviceRadixSort entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to CUB_SEGMENTED sorting entrypoint (specialized for ascending) - */ -template -CUB_RUNTIME_FUNCTION -__forceinline__ -cudaError_t Dispatch( - Int2Type is_descending, - Int2Type dispatch_to, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - cudaStream_t stream, - bool debug_synchronous) -{ - return DeviceSegmentedRadixSort::SortPairs( - d_temp_storage, temp_storage_bytes, - d_keys, d_values, - num_items, num_segments, d_segment_offsets, d_segment_offsets + 1, - begin_bit, end_bit, stream, debug_synchronous); -} - -/** - * Dispatch to CUB_SEGMENTED_NO_OVERWRITE sorting entrypoint (specialized for ascending) - */ -template -CUB_RUNTIME_FUNCTION -__forceinline__ -cudaError_t Dispatch( - Int2Type is_descending, - Int2Type dispatch_to, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - cudaStream_t stream, - bool debug_synchronous) -{ - KeyT const *const_keys_itr = d_keys.Current(); - ValueT const *const_values_itr = d_values.Current(); - - cudaError_t retval = DeviceSegmentedRadixSort::SortPairs( - d_temp_storage, temp_storage_bytes, - const_keys_itr, d_keys.Alternate(), const_values_itr, d_values.Alternate(), - num_items, num_segments, d_segment_offsets, d_segment_offsets + 1, - begin_bit, end_bit, stream, debug_synchronous); - - d_keys.selector ^= 1; - d_values.selector ^= 1; - return retval; -} - - -/** - * Dispatch to CUB_SEGMENTED sorting entrypoint (specialized for descending) - */ -template -CUB_RUNTIME_FUNCTION -__forceinline__ -cudaError_t Dispatch( - Int2Type is_descending, - Int2Type dispatch_to, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - cudaStream_t stream, - bool debug_synchronous) -{ - return DeviceSegmentedRadixSort::SortPairsDescending( - d_temp_storage, temp_storage_bytes, - d_keys, d_values, - num_items, num_segments, d_segment_offsets, d_segment_offsets + 1, - begin_bit, end_bit, stream, debug_synchronous); -} - -/** - * Dispatch to CUB_SEGMENTED_NO_OVERWRITE sorting entrypoint (specialized for descending) - */ -template -CUB_RUNTIME_FUNCTION -__forceinline__ -cudaError_t Dispatch( - Int2Type is_descending, - Int2Type dispatch_to, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - cudaStream_t stream, - bool debug_synchronous) -{ - KeyT const *const_keys_itr = d_keys.Current(); - ValueT const *const_values_itr = d_values.Current(); - - cudaError_t retval = DeviceSegmentedRadixSort::SortPairsDescending( - d_temp_storage, temp_storage_bytes, - const_keys_itr, d_keys.Alternate(), const_values_itr, d_values.Alternate(), - num_items, num_segments, d_segment_offsets, d_segment_offsets + 1, - begin_bit, end_bit, stream, debug_synchronous); - - d_keys.selector ^= 1; - d_values.selector ^= 1; - return retval; -} - - -//--------------------------------------------------------------------- -// Dispatch to different Thrust entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch keys-only to Thrust sorting entrypoint - */ -template -cudaError_t Dispatch( - Int2Type is_descending, - Int2Type dispatch_to, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void *d_temp_storage, - size_t &temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - cudaStream_t stream, - bool debug_synchronous) -{ - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_keys_wrapper(d_keys.Current()); - - if (IS_DESCENDING) thrust::reverse(d_keys_wrapper, d_keys_wrapper + num_items); - thrust::sort(d_keys_wrapper, d_keys_wrapper + num_items); - if (IS_DESCENDING) thrust::reverse(d_keys_wrapper, d_keys_wrapper + num_items); - } - - return cudaSuccess; -} - - -/** - * Dispatch key-value pairs to Thrust sorting entrypoint - */ -template -cudaError_t Dispatch( - Int2Type is_descending, - Int2Type dispatch_to, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void *d_temp_storage, - size_t &temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - cudaStream_t stream, - bool debug_synchronous) -{ - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_keys_wrapper(d_keys.Current()); - thrust::device_ptr d_values_wrapper(d_values.Current()); - - if (IS_DESCENDING) { - thrust::reverse(d_keys_wrapper, d_keys_wrapper + num_items); - thrust::reverse(d_values_wrapper, d_values_wrapper + num_items); - } - - thrust::sort_by_key(d_keys_wrapper, d_keys_wrapper + num_items, d_values_wrapper); - - if (IS_DESCENDING) { - thrust::reverse(d_keys_wrapper, d_keys_wrapper + num_items); - thrust::reverse(d_values_wrapper, d_values_wrapper + num_items); - } - } - - return cudaSuccess; -} - - -//--------------------------------------------------------------------- -// CUDA Nested Parallelism Test Kernel -//--------------------------------------------------------------------- - -/** - * Simple wrapper kernel to invoke DeviceRadixSort - */ -template -__global__ void CnpDispatchKernel( - Int2Type is_descending, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void *d_temp_storage, - size_t temp_storage_bytes, - DoubleBuffer d_keys, - DoubleBuffer d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - bool debug_synchronous) -{ -#ifndef CUB_CDP - *d_cdp_error = cudaErrorNotSupported; -#else - *d_cdp_error = Dispatch( - is_descending, Int2Type(), d_selector, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_keys, d_values, - num_items, num_segments, d_segment_offsets, - begin_bit, end_bit, 0, debug_synchronous); - *d_temp_storage_bytes = temp_storage_bytes; - *d_selector = d_keys.selector; -#endif -} - - -/** - * Dispatch to CDP kernel - */ -template -cudaError_t Dispatch( - Int2Type is_descending, - Int2Type dispatch_to, - int *d_selector, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void *d_temp_storage, - size_t &temp_storage_bytes, - DoubleBuffer &d_keys, - DoubleBuffer &d_values, - int num_items, - int num_segments, - const int *d_segment_offsets, - int begin_bit, - int end_bit, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to invoke device-side dispatch - CnpDispatchKernel<<<1,1>>>( - is_descending, d_selector, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_keys, d_values, - num_items, num_segments, d_segment_offsets, - begin_bit, end_bit, debug_synchronous); - - // Copy out selector - CubDebugExit(cudaMemcpy(&d_keys.selector, d_selector, sizeof(int) * 1, cudaMemcpyDeviceToHost)); - d_values.selector = d_keys.selector; - - // Copy out temp_storage_bytes - CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost)); - - // Copy out error - cudaError_t retval; - CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost)); - return retval; -} - - - -//--------------------------------------------------------------------- -// Problem generation -//--------------------------------------------------------------------- - - -/** - * Simple key-value pairing - */ -template < - typename KeyT, - typename ValueT, - bool IS_FLOAT = (Traits::CATEGORY == FLOATING_POINT)> -struct Pair -{ - KeyT key; - ValueT value; - - bool operator<(const Pair &b) const - { - return (key < b.key); - } -}; - - -/** - * Simple key-value pairing (specialized for bool types) - */ -template -struct Pair -{ - bool key; - ValueT value; - - bool operator<(const Pair &b) const - { - return (!key && b.key); - } -}; - - -/** - * Simple key-value pairing (specialized for floating point types) - */ -template -struct Pair -{ - KeyT key; - ValueT value; - - bool operator<(const Pair &b) const - { - if (key < b.key) - return true; - - if (key > b.key) - return false; - - // KeyT in unsigned bits - typedef typename Traits::UnsignedBits UnsignedBits; - - // Return true if key is negative zero and b.key is positive zero - UnsignedBits key_bits = *reinterpret_cast(const_cast(&key)); - UnsignedBits b_key_bits = *reinterpret_cast(const_cast(&b.key)); - UnsignedBits HIGH_BIT = Traits::HIGH_BIT; - - return ((key_bits & HIGH_BIT) != 0) && ((b_key_bits & HIGH_BIT) == 0); - } -}; - - -/** - * Initialize key data - */ -template -void InitializeKeyBits( - GenMode gen_mode, - KeyT *h_keys, - int num_items, - int entropy_reduction) -{ - for (int i = 0; i < num_items; ++i) - InitValue(gen_mode, h_keys[i], i); -} - - -/** - * Initialize solution - */ -template -void InitializeSolution( - KeyT *h_keys, - int num_items, - int num_segments, - int *h_segment_offsets, - int begin_bit, - int end_bit, - int *&h_reference_ranks, - KeyT *&h_reference_keys) -{ - typedef Pair PairT; - - PairT *h_pairs = new PairT[num_items]; - - int num_bits = end_bit - begin_bit; - for (int i = 0; i < num_items; ++i) - { - - // Mask off unwanted portions - if (num_bits < sizeof(KeyT) * 8) - { - unsigned long long base = 0; - memcpy(&base, &h_keys[i], sizeof(KeyT)); - base &= ((1ull << num_bits) - 1) << begin_bit; - memcpy(&h_pairs[i].key, &base, sizeof(KeyT)); - } - else - { - h_pairs[i].key = h_keys[i]; - } - - h_pairs[i].value = i; - } - - printf("\nSorting reference solution on CPU (%d segments)...", num_segments); fflush(stdout); - - for (int i = 0; i < num_segments; ++i) - { - if (IS_DESCENDING) std::reverse(h_pairs + h_segment_offsets[i], h_pairs + h_segment_offsets[i + 1]); - std::stable_sort( h_pairs + h_segment_offsets[i], h_pairs + h_segment_offsets[i + 1]); - if (IS_DESCENDING) std::reverse(h_pairs + h_segment_offsets[i], h_pairs + h_segment_offsets[i + 1]); - } - - printf(" Done.\n"); fflush(stdout); - - h_reference_ranks = new int[num_items]; - h_reference_keys = new KeyT[num_items]; - - for (int i = 0; i < num_items; ++i) - { - h_reference_ranks[i] = h_pairs[i].value; - h_reference_keys[i] = h_keys[h_pairs[i].value]; - } - - if (h_pairs) delete[] h_pairs; -} - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - - -/** - * Test DeviceRadixSort - */ -template < - Backend BACKEND, - bool IS_DESCENDING, - typename KeyT, - typename ValueT> -void Test( - KeyT *h_keys, - ValueT *h_values, - int num_items, - int num_segments, - int *h_segment_offsets, - int begin_bit, - int end_bit, - KeyT *h_reference_keys, - ValueT *h_reference_values) -{ - const bool KEYS_ONLY = Equals::VALUE; - - printf("%s %s cub::DeviceRadixSort %d items, %d segments, %d-byte keys (%s) %d-byte values (%s), descending %d, begin_bit %d, end_bit %d\n", - (BACKEND == CUB_NO_OVERWRITE) ? "CUB_NO_OVERWRITE" : (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", - (KEYS_ONLY) ? "keys-only" : "key-value", - num_items, num_segments, - (int) sizeof(KeyT), typeid(KeyT).name(), (KEYS_ONLY) ? 0 : (int) sizeof(ValueT), typeid(ValueT).name(), - IS_DESCENDING, begin_bit, end_bit); - fflush(stdout); - - if (g_verbose) - { - printf("Input keys:\n"); - DisplayResults(h_keys, num_items); - printf("\n\n"); - } - - // Allocate device arrays - DoubleBuffer d_keys; - DoubleBuffer d_values; - int *d_selector; - int *d_segment_offsets; - size_t *d_temp_storage_bytes; - cudaError_t *d_cdp_error; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys.d_buffers[0], sizeof(KeyT) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys.d_buffers[1], sizeof(KeyT) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_selector, sizeof(int) * 1)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(int) * (num_segments + 1))); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1)); - if (!KEYS_ONLY) - { - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values.d_buffers[0], sizeof(ValueT) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values.d_buffers[1], sizeof(ValueT) * num_items)); - } - - // Allocate temporary storage (and make it un-aligned) - size_t temp_storage_bytes = 0; - void *d_temp_storage = NULL; - CubDebugExit(Dispatch( - Int2Type(), Int2Type(), d_selector, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_keys, d_values, - num_items, num_segments, d_segment_offsets, - begin_bit, end_bit, 0, true)); - - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes + 1)); - void* mis_aligned_temp = static_cast(d_temp_storage) + 1; - - // Initialize/clear device arrays - d_keys.selector = 0; - CubDebugExit(cudaMemcpy(d_keys.d_buffers[0], h_keys, sizeof(KeyT) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_keys.d_buffers[1], 0, sizeof(KeyT) * num_items)); - if (!KEYS_ONLY) - { - d_values.selector = 0; - CubDebugExit(cudaMemcpy(d_values.d_buffers[0], h_values, sizeof(ValueT) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_values.d_buffers[1], 0, sizeof(ValueT) * num_items)); - } - CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(int) * (num_segments + 1), cudaMemcpyHostToDevice)); - - // Run warmup/correctness iteration - CubDebugExit(Dispatch( - Int2Type(), Int2Type(), d_selector, d_temp_storage_bytes, d_cdp_error, - mis_aligned_temp, temp_storage_bytes, d_keys, d_values, - num_items, num_segments, d_segment_offsets, - begin_bit, end_bit, 0, true)); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Check for correctness (and display results, if specified) - printf("Warmup done. Checking results:\n"); fflush(stdout); - int compare = CompareDeviceResults(h_reference_keys, d_keys.Current(), num_items, true, g_verbose); - printf("\t Compare keys (selector %d): %s ", d_keys.selector, compare ? "FAIL" : "PASS"); fflush(stdout); - if (!KEYS_ONLY) - { - int values_compare = CompareDeviceResults(h_reference_values, d_values.Current(), num_items, true, g_verbose); - compare |= values_compare; - printf("\t Compare values (selector %d): %s ", d_values.selector, values_compare ? "FAIL" : "PASS"); fflush(stdout); - } - if (BACKEND == CUB_NO_OVERWRITE) - { - // Check that input isn't overwritten - int input_compare = CompareDeviceResults(h_keys, d_keys.d_buffers[0], num_items, true, g_verbose); - compare |= input_compare; - printf("\t Compare input keys: %s ", input_compare ? "FAIL" : "PASS"); fflush(stdout); - } - - // Performance - if (g_timing_iterations) - printf("\nPerforming timing iterations:\n"); fflush(stdout); - - GpuTimer gpu_timer; - float elapsed_millis = 0.0f; - for (int i = 0; i < g_timing_iterations; ++i) - { - // Initialize/clear device arrays - CubDebugExit(cudaMemcpy(d_keys.d_buffers[d_keys.selector], h_keys, sizeof(KeyT) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_keys.d_buffers[d_keys.selector ^ 1], 0, sizeof(KeyT) * num_items)); - if (!KEYS_ONLY) - { - CubDebugExit(cudaMemcpy(d_values.d_buffers[d_values.selector], h_values, sizeof(ValueT) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_values.d_buffers[d_values.selector ^ 1], 0, sizeof(ValueT) * num_items)); - } - - gpu_timer.Start(); - CubDebugExit(Dispatch( - Int2Type(), Int2Type(), d_selector, d_temp_storage_bytes, d_cdp_error, - mis_aligned_temp, temp_storage_bytes, d_keys, d_values, - num_items, num_segments, d_segment_offsets, - begin_bit, end_bit, 0, false)); - gpu_timer.Stop(); - elapsed_millis += gpu_timer.ElapsedMillis(); - } - - // Display performance - if (g_timing_iterations > 0) - { - float avg_millis = elapsed_millis / g_timing_iterations; - float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f; - float giga_bandwidth = (KEYS_ONLY) ? - giga_rate * sizeof(KeyT) * 2 : - giga_rate * (sizeof(KeyT) + sizeof(ValueT)) * 2; - printf("\n%.3f elapsed ms, %.3f avg ms, %.3f billion items/s, %.3f logical GB/s", elapsed_millis, avg_millis, giga_rate, giga_bandwidth); - } - - printf("\n\n"); - - // Cleanup - if (d_keys.d_buffers[0]) CubDebugExit(g_allocator.DeviceFree(d_keys.d_buffers[0])); - if (d_keys.d_buffers[1]) CubDebugExit(g_allocator.DeviceFree(d_keys.d_buffers[1])); - if (d_values.d_buffers[0]) CubDebugExit(g_allocator.DeviceFree(d_values.d_buffers[0])); - if (d_values.d_buffers[1]) CubDebugExit(g_allocator.DeviceFree(d_values.d_buffers[1])); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); - if (d_selector) CubDebugExit(g_allocator.DeviceFree(d_selector)); - if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets)); - if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); - - // Correctness asserts - AssertEquals(0, compare); -} - - -/** - * Test backend - */ -template -void TestBackend( - KeyT *h_keys, - int num_items, - int num_segments, - int *h_segment_offsets, - int begin_bit, - int end_bit, - KeyT *h_reference_keys, - int *h_reference_ranks) -{ - const bool KEYS_ONLY = Equals::VALUE; - - ValueT *h_values = NULL; - ValueT *h_reference_values = NULL; - - if (!KEYS_ONLY) - { - h_values = new ValueT[num_items]; - h_reference_values = new ValueT[num_items]; - - for (int i = 0; i < num_items; ++i) - { - InitValue(INTEGER_SEED, h_values[i], i); - InitValue(INTEGER_SEED, h_reference_values[i], h_reference_ranks[i]); - } - } - - if (num_segments == 1) - { - // Test single-segment implementations - Test( h_keys, h_values, num_items, num_segments, h_segment_offsets, begin_bit, end_bit, h_reference_keys, h_reference_values); - Test( h_keys, h_values, num_items, num_segments, h_segment_offsets, begin_bit, end_bit, h_reference_keys, h_reference_values); -#ifdef CUB_CDP - Test( h_keys, h_values, num_items, num_segments, h_segment_offsets, begin_bit, end_bit, h_reference_keys, h_reference_values); -#endif - } - - // Test multi-segment implementations - Test( h_keys, h_values, num_items, num_segments, h_segment_offsets, begin_bit, end_bit, h_reference_keys, h_reference_values); - Test( h_keys, h_values, num_items, num_segments, h_segment_offsets, begin_bit, end_bit, h_reference_keys, h_reference_values); - - if (h_values) delete[] h_values; - if (h_reference_values) delete[] h_reference_values; -} - - - - -/** - * Test value type - */ -template -void TestValueTypes( - KeyT *h_keys, - int num_items, - int num_segments, - int *h_segment_offsets, - int begin_bit, - int end_bit) -{ - // Initialize the solution - - int *h_reference_ranks = NULL; - KeyT *h_reference_keys = NULL; - InitializeSolution(h_keys, num_items, num_segments, h_segment_offsets, begin_bit, end_bit, h_reference_ranks, h_reference_keys); - - // Test value types - - TestBackend (h_keys, num_items, num_segments, h_segment_offsets, begin_bit, end_bit, h_reference_keys, h_reference_ranks); - TestBackend (h_keys, num_items, num_segments, h_segment_offsets, begin_bit, end_bit, h_reference_keys, h_reference_ranks); - - if (!Equals::VALUE) - TestBackend (h_keys, num_items, num_segments, h_segment_offsets, begin_bit, end_bit, h_reference_keys, h_reference_ranks); - - if (!Equals::VALUE) - TestBackend(h_keys, num_items, num_segments, h_segment_offsets, begin_bit, end_bit, h_reference_keys, h_reference_ranks); - - TestBackend (h_keys, num_items, num_segments, h_segment_offsets, begin_bit, end_bit, h_reference_keys, h_reference_ranks); - - // Cleanup - if (h_reference_ranks) delete[] h_reference_ranks; - if (h_reference_keys) delete[] h_reference_keys; -} - - - -/** - * Test ascending/descending - */ -template -void TestDirection( - KeyT *h_keys, - int num_items, - int num_segments, - int *h_segment_offsets, - int begin_bit, - int end_bit) -{ - TestValueTypes(h_keys, num_items, num_segments, h_segment_offsets, begin_bit, end_bit); - TestValueTypes(h_keys, num_items, num_segments, h_segment_offsets, begin_bit, end_bit); -} - - -/** - * Test different bit ranges - */ -template -void TestBits( - KeyT *h_keys, - int num_items, - int num_segments, - int *h_segment_offsets) -{ - // Don't test partial-word sorting for boolean, fp, or signed types (the bit-flipping techniques get in the way) - if ((Traits::CATEGORY == UNSIGNED_INTEGER) && (!Equals::VALUE)) - { - // Partial bits - int begin_bit = 1; - int end_bit = (sizeof(KeyT) * 8) - 1; - printf("Testing key bits [%d,%d)\n", begin_bit, end_bit); fflush(stdout); - TestDirection(h_keys, num_items, num_segments, h_segment_offsets, begin_bit, end_bit); - - // Across subword boundaries - int mid_bit = sizeof(KeyT) * 4; - printf("Testing key bits [%d,%d)\n", mid_bit - 1, mid_bit + 1); fflush(stdout); - TestDirection(h_keys, num_items, num_segments, h_segment_offsets, mid_bit - 1, mid_bit + 1); - } - - printf("Testing key bits [%d,%d)\n", 0, int(sizeof(KeyT)) * 8); fflush(stdout); - TestDirection(h_keys, num_items, num_segments, h_segment_offsets, 0, sizeof(KeyT) * 8); -} - - -/** - * Test different segment compositions - */ -template -void TestSegments( - KeyT *h_keys, - int num_items, - int max_segments) -{ - int *h_segment_offsets = new int[max_segments + 1]; - - for (int num_segments = max_segments; num_segments > 1; num_segments = (num_segments + 32 - 1) / 32) - { - if (num_items / num_segments < 128 * 1000) { - // Right now we assign a single thread block to each segment, so lets keep it to under 128K items per segment - InitializeSegments(num_items, num_segments, h_segment_offsets); - TestBits(h_keys, num_items, num_segments, h_segment_offsets); - } - } - - // Test single segment - if (num_items < 128 * 1000) { - // Right now we assign a single thread block to each segment, so lets keep it to under 128K items per segment - InitializeSegments(num_items, 1, h_segment_offsets); - TestBits(h_keys, num_items, 1, h_segment_offsets); - } - - if (h_segment_offsets) delete[] h_segment_offsets; -} - - -/** - * Test different (sub)lengths and number of segments - */ -template -void TestSizes( - KeyT *h_keys, - int max_items, - int max_segments) -{ - for (int num_items = max_items; num_items > 1; num_items = (num_items + 32 - 1) / 32) - { - TestSegments(h_keys, num_items, max_segments); - } - TestSegments(h_keys, 1, max_segments); - TestSegments(h_keys, 0, max_segments); -} - - -/** - * Test key sampling distributions - */ -template -void TestGen( - int max_items, - int max_segments) -{ - int ptx_version; - CubDebugExit(PtxVersion(ptx_version)); - - if (max_items < 0) - max_items = (ptx_version > 100) ? 9000003 : max_items = 5000003; - - if (max_segments < 0) - max_segments = 5003; - - KeyT *h_keys = new KeyT[max_items]; - - for (int entropy_reduction = 0; entropy_reduction <= 6; entropy_reduction += 3) - { - printf("\nTesting random %s keys with entropy reduction factor %d\n", typeid(KeyT).name(), entropy_reduction); fflush(stdout); - InitializeKeyBits(RANDOM, h_keys, max_items, entropy_reduction); - TestSizes(h_keys, max_items, max_segments); - } - - printf("\nTesting uniform %s keys\n", typeid(KeyT).name()); fflush(stdout); - InitializeKeyBits(UNIFORM, h_keys, max_items, 0); - TestSizes(h_keys, max_items, max_segments); - - printf("\nTesting natural number %s keys\n", typeid(KeyT).name()); fflush(stdout); - InitializeKeyBits(INTEGER_SEED, h_keys, max_items, 0); - TestSizes(h_keys, max_items, max_segments); - - if (h_keys) delete[] h_keys; -} - - -//--------------------------------------------------------------------- -// Simple test -//--------------------------------------------------------------------- - -template < - Backend BACKEND, - typename KeyT, - typename ValueT, - bool IS_DESCENDING> -void Test( - int num_items, - int num_segments, - GenMode gen_mode, - int entropy_reduction, - int begin_bit, - int end_bit) -{ - const bool KEYS_ONLY = Equals::VALUE; - - KeyT *h_keys = new KeyT[num_items]; - int *h_reference_ranks = NULL; - KeyT *h_reference_keys = NULL; - ValueT *h_values = NULL; - ValueT *h_reference_values = NULL; - int *h_segment_offsets = new int[num_segments + 1]; - - if (end_bit < 0) - end_bit = sizeof(KeyT) * 8; - - InitializeKeyBits(gen_mode, h_keys, num_items, entropy_reduction); - InitializeSegments(num_items, num_segments, h_segment_offsets); - InitializeSolution( - h_keys, num_items, num_segments, h_segment_offsets, - begin_bit, end_bit, h_reference_ranks, h_reference_keys); - - if (!KEYS_ONLY) - { - h_values = new ValueT[num_items]; - h_reference_values = new ValueT[num_items]; - - for (int i = 0; i < num_items; ++i) - { - InitValue(INTEGER_SEED, h_values[i], i); - InitValue(INTEGER_SEED, h_reference_values[i], h_reference_ranks[i]); - } - } - if (h_reference_ranks) delete[] h_reference_ranks; - - printf("\nTesting bits [%d,%d) of %s keys with gen-mode %d\n", begin_bit, end_bit, typeid(KeyT).name(), gen_mode); fflush(stdout); - Test( - h_keys, h_values, - num_items, num_segments, h_segment_offsets, - begin_bit, end_bit, h_reference_keys, h_reference_values); - - if (h_keys) delete[] h_keys; - if (h_reference_keys) delete[] h_reference_keys; - if (h_values) delete[] h_values; - if (h_reference_values) delete[] h_reference_values; - if (h_segment_offsets) delete[] h_segment_offsets; -} - - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int bits = -1; - int num_items = -1; - int num_segments = -1; - int entropy_reduction = 0; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("s", num_segments); - args.GetCmdLineArgument("i", g_timing_iterations); - args.GetCmdLineArgument("repeat", g_repeat); - args.GetCmdLineArgument("bits", bits); - args.GetCmdLineArgument("entropy", entropy_reduction); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--bits=]" - "[--n= " - "[--s= " - "[--i= " - "[--device=] " - "[--repeat=]" - "[--v] " - "[--entropy=]" - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Get ptx version - int ptx_version; - CubDebugExit(PtxVersion(ptx_version)); - -#ifdef QUICKER_TEST - - enum { - IS_DESCENDING = false - }; - - // Compile/run basic CUB test - if (num_items < 0) num_items = 48000000; - if (num_segments < 0) num_segments = 5000; - - - Test( num_items, num_segments, RANDOM, entropy_reduction, 0, bits); - - Test( num_items, 1, RANDOM, entropy_reduction, 0, bits); - Test( num_items, 1, RANDOM, entropy_reduction, 0, bits); - - Test( num_items, 1, RANDOM, entropy_reduction, 0, bits); - Test( num_items, 1, RANDOM, entropy_reduction, 0, bits); - -#elif defined(QUICK_TEST) - - // Compile/run quick tests - if (num_items < 0) num_items = 48000000; - if (num_segments < 0) num_segments = 5000; - - // Compare CUB and thrust on 32b keys-only - Test ( num_items, 1, RANDOM, entropy_reduction, 0, bits); - Test ( num_items, 1, RANDOM, entropy_reduction, 0, bits); - - // Compare CUB and thrust on 64b keys-only - Test ( num_items, 1, RANDOM, entropy_reduction, 0, bits); - Test ( num_items, 1, RANDOM, entropy_reduction, 0, bits); - - - // Compare CUB and thrust on 32b key-value pairs - Test ( num_items, 1, RANDOM, entropy_reduction, 0, bits); - Test ( num_items, 1, RANDOM, entropy_reduction, 0, bits); - - // Compare CUB and thrust on 64b key-value pairs - Test ( num_items, 1, RANDOM, entropy_reduction, 0, bits); - Test ( num_items, 1, RANDOM, entropy_reduction, 0, bits); - - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - TestGen (num_items, num_segments); - - TestGen (num_items, num_segments); - TestGen (num_items, num_segments); - TestGen (num_items, num_segments); - - TestGen (num_items, num_segments); - TestGen (num_items, num_segments); - - TestGen (num_items, num_segments); - TestGen (num_items, num_segments); - - TestGen (num_items, num_segments); - TestGen (num_items, num_segments); - - TestGen (num_items, num_segments); - TestGen (num_items, num_segments); - - TestGen (num_items, num_segments); - - if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted - TestGen (num_items, num_segments); - - } - -#endif - - return 0; -} - diff --git a/ml-xgboost/cub/test/test_device_reduce.cu b/ml-xgboost/cub/test/test_device_reduce.cu deleted file mode 100644 index c4551a7..0000000 --- a/ml-xgboost/cub/test/test_device_reduce.cu +++ /dev/null @@ -1,1307 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of DeviceReduce utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -int g_ptx_version; -int g_sm_count; -bool g_verbose = false; -bool g_verbose_input = false; -int g_timing_iterations = 0; -int g_repeat = 0; -CachingDeviceAllocator g_allocator(true); - - -// Dispatch types -enum Backend -{ - CUB, // CUB method - CUB_SEGMENTED, // CUB segmented method - CUB_CDP, // GPU-based (dynamic parallelism) dispatch to CUB method - THRUST, // Thrust method -}; - - -// Custom max functor -struct CustomMax -{ - /// Boolean max operator, returns (a > b) ? a : b - template - __host__ __device__ __forceinline__ OutputT operator()(const OutputT &a, const OutputT &b) - { - return CUB_MAX(a, b); - } -}; - - -//--------------------------------------------------------------------- -// Dispatch to different CUB DeviceReduce entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to reduce entrypoint (custom-max) - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - ReductionOpT reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - // Max-identity - OutputT identity = Traits::Lowest(); // replace with std::numeric_limits::lowest() when C++ support is more prevalent - - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, - d_in, d_out, num_items, reduction_op, identity, - stream, debug_synchronous); - } - return error; -} - -/** - * Dispatch to sum entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - cub::Sum reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); - } - return error; -} - -/** - * Dispatch to min entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - cub::Min reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); - } - return error; -} - -/** - * Dispatch to max entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - cub::Max reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); - } - return error; -} - -/** - * Dispatch to argmin entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - cub::ArgMin reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); - } - return error; -} - -/** - * Dispatch to argmax entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - cub::ArgMax reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); - } - return error; -} - - -//--------------------------------------------------------------------- -// Dispatch to different CUB DeviceSegmentedReduce entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to reduce entrypoint (custom-max) - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - ReductionOpT reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - // Max-identity - OutputT identity = Traits::Lowest(); // replace with std::numeric_limits::lowest() when C++ support is more prevalent - - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes, - d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, reduction_op, identity, - stream, debug_synchronous); - } - return error; -} - -/** - * Dispatch to sum entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - cub::Sum reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, - d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, - stream, debug_synchronous); - } - return error; -} - -/** - * Dispatch to min entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - cub::Min reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes, - d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, - stream, debug_synchronous); - } - return error; -} - -/** - * Dispatch to max entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - cub::Max reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes, - d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, - stream, debug_synchronous); - } - return error; -} - -/** - * Dispatch to argmin entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - cub::ArgMin reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes, - d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, - stream, debug_synchronous); - } - return error; -} - -/** - * Dispatch to argmax entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - cub::ArgMax reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to device reduction directly - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, - d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, - stream, debug_synchronous); - } - return error; -} - - -//--------------------------------------------------------------------- -// Dispatch to different Thrust entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to reduction entrypoint (min or max specialization) - */ -template -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - ReductionOpT reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - OutputT init; - CubDebugExit(cudaMemcpy(&init, d_in + 0, sizeof(OutputT), cudaMemcpyDeviceToHost)); - - thrust::device_ptr d_in_wrapper(d_in); - OutputT retval; - for (int i = 0; i < timing_timing_iterations; ++i) - { - retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items, init, reduction_op); - } - - if (!Equals >::VALUE) - CubDebugExit(cudaMemcpy(d_out, &retval, sizeof(OutputT), cudaMemcpyHostToDevice)); - } - - return cudaSuccess; -} - -/** - * Dispatch to reduction entrypoint (sum specialization) - */ -template -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - Sum reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_in_wrapper(d_in); - OutputT retval; - for (int i = 0; i < timing_timing_iterations; ++i) - { - retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items); - } - - if (!Equals >::VALUE) - CubDebugExit(cudaMemcpy(d_out, &retval, sizeof(OutputT), cudaMemcpyHostToDevice)); - } - - return cudaSuccess; -} - - -//--------------------------------------------------------------------- -// CUDA nested-parallelism test kernel -//--------------------------------------------------------------------- - -/** - * Simple wrapper kernel to invoke DeviceReduce - */ -template < - typename InputIteratorT, - typename OutputIteratorT, - typename ReductionOpT> -__global__ void CnpDispatchKernel( - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - ReductionOpT reduction_op, - bool debug_synchronous) -{ -#ifndef CUB_CDP - *d_cdp_error = cudaErrorNotSupported; -#else - *d_cdp_error = Dispatch(Int2Type(), timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, - d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, 0, debug_synchronous); - *d_temp_storage_bytes = temp_storage_bytes; -#endif -} - - -/** - * Dispatch to CUB_CDP kernel - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - int num_items, - int max_segments, - int *d_segment_offsets, - ReductionOpT reduction_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to invoke device-side dispatch - CnpDispatchKernel<<<1,1>>>(timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, - d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, debug_synchronous); - - // Copy out temp_storage_bytes - CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost)); - - // Copy out error - cudaError_t retval; - CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost)); - return retval; -} - - - -//--------------------------------------------------------------------- -// Problem generation -//--------------------------------------------------------------------- - -/// Initialize problem -template -void Initialize( - GenMode gen_mode, - InputT *h_in, - int num_items) -{ - for (int i = 0; i < num_items; ++i) - { - InitValue(gen_mode, h_in[i], i); - } - - if (g_verbose_input) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n\n"); - } -} - - -/// Solve problem (max/custom-max functor) -template -struct Solution -{ - typedef _OutputT OutputT; - - template - static void Solve(HostInputIteratorT h_in, OutputT *h_reference, int num_segments, int *h_segment_offsets, - ReductionOpT reduction_op) - { - for (int i = 0; i < num_segments; ++i) - { - OutputT aggregate = Traits::Lowest(); // replace with std::numeric_limits::lowest() when C++ support is more prevalent - for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) - aggregate = reduction_op(aggregate, OutputT(h_in[j])); - h_reference[i] = aggregate; - } - } -}; - -/// Solve problem (min functor) -template -struct Solution -{ - typedef _OutputT OutputT; - - template - static void Solve(HostInputIteratorT h_in, OutputT *h_reference, int num_segments, int *h_segment_offsets, - cub::Min reduction_op) - { - for (int i = 0; i < num_segments; ++i) - { - OutputT aggregate = Traits::Max(); // replace with std::numeric_limits::max() when C++ support is more prevalent - for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) - aggregate = reduction_op(aggregate, OutputT(h_in[j])); - h_reference[i] = aggregate; - } - } -}; - - -/// Solve problem (sum functor) -template -struct Solution -{ - typedef _OutputT OutputT; - - template - static void Solve(HostInputIteratorT h_in, OutputT *h_reference, int num_segments, int *h_segment_offsets, - cub::Sum reduction_op) - { - for (int i = 0; i < num_segments; ++i) - { - OutputT aggregate; - InitValue(INTEGER_SEED, aggregate, 0); - for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) - aggregate = reduction_op(aggregate, OutputT(h_in[j])); - h_reference[i] = aggregate; - } - } -}; - -/// Solve problem (argmin functor) -template -struct Solution -{ - typedef KeyValuePair OutputT; - - template - static void Solve(HostInputIteratorT h_in, OutputT *h_reference, int num_segments, int *h_segment_offsets, - cub::ArgMin reduction_op) - { - for (int i = 0; i < num_segments; ++i) - { - OutputT aggregate(1, Traits::Max()); // replace with std::numeric_limits::max() when C++ support is more prevalent - for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) - { - OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j])); - aggregate = reduction_op(aggregate, item); - } - h_reference[i] = aggregate; - } - } -}; - - -/// Solve problem (argmax functor) -template -struct Solution -{ - typedef KeyValuePair OutputT; - - template - static void Solve(HostInputIteratorT h_in, OutputT *h_reference, int num_segments, int *h_segment_offsets, - cub::ArgMax reduction_op) - { - for (int i = 0; i < num_segments; ++i) - { - OutputT aggregate(1, Traits::Lowest()); // replace with std::numeric_limits::lowest() when C++ support is more prevalent - for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) - { - OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j])); - aggregate = reduction_op(aggregate, item); - } - h_reference[i] = aggregate; - } - } -}; - - -//--------------------------------------------------------------------- -// Problem generation -//--------------------------------------------------------------------- - -/// Test DeviceReduce for a given problem input -template < - typename BackendT, - typename DeviceInputIteratorT, - typename HostReferenceIteratorT, - typename ReductionOpT> -void Test( - BackendT backend, - DeviceInputIteratorT d_in, - int num_items, - int num_segments, - int *d_segment_offsets, - ReductionOpT reduction_op, - HostReferenceIteratorT h_reference) -{ - // Input and output data types - typedef typename std::iterator_traits::value_type InputT; - typedef typename std::iterator_traits::value_type OutputT; - - // Allocate CUB_CDP device arrays for temp storage size and error - OutputT *d_out = NULL; - size_t *d_temp_storage_bytes = NULL; - cudaError_t *d_cdp_error = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(OutputT) * num_segments)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1)); - - // Inquire temp device storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(Dispatch(backend, 1, - d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, - d_in, d_out, num_items, num_segments, d_segment_offsets, - reduction_op, 0, true)); - - // Allocate temp device storage - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Clear device output - CubDebugExit(cudaMemset(d_out, 0, sizeof(OutputT) * num_segments)); - - // Run once with discard iterator - DiscardOutputIterator discard_itr; - CubDebugExit(Dispatch(backend, 1, - d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, - d_in, discard_itr, num_items, num_segments, d_segment_offsets, - reduction_op, 0, true)); - - // Run warmup/correctness iteration - CubDebugExit(Dispatch(backend, 1, - d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, - d_in, d_out, num_items, num_segments, d_segment_offsets, - reduction_op, 0, true)); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults(h_reference, d_out, num_segments, g_verbose, g_verbose); - printf("\t%s", compare ? "FAIL" : "PASS"); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Performance - if (g_timing_iterations > 0) - { - GpuTimer gpu_timer; - gpu_timer.Start(); - - CubDebugExit(Dispatch(backend, g_timing_iterations, - d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, - d_in, d_out, num_items, num_segments, d_segment_offsets, - reduction_op, 0, false)); - - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - // Display performance - float avg_millis = elapsed_millis / g_timing_iterations; - float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f; - float giga_bandwidth = giga_rate * sizeof(InputT); - printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s", avg_millis, giga_rate, giga_bandwidth); - } - - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); - if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - // Correctness asserts - AssertEquals(0, compare); -} - - -/// Test DeviceReduce -template < - Backend BACKEND, - typename OutputValueT, - typename HostInputIteratorT, - typename DeviceInputIteratorT, - typename ReductionOpT> -void SolveAndTest( - HostInputIteratorT h_in, - DeviceInputIteratorT d_in, - int num_items, - int num_segments, - int *h_segment_offsets, - int *d_segment_offsets, - ReductionOpT reduction_op) -{ - typedef typename std::iterator_traits::value_type InputValueT; - typedef Solution SolutionT; - typedef typename SolutionT::OutputT OutputT; - - printf("\n\n%s cub::DeviceReduce<%s> %d items (%s), %d segments\n", - (BACKEND == CUB_CDP) ? "CUB_CDP" : (BACKEND == THRUST) ? "Thrust" : (BACKEND == CUB_SEGMENTED) ? "CUB_SEGMENTED" : "CUB", - typeid(ReductionOpT).name(), num_items, typeid(HostInputIteratorT).name(), num_segments); - fflush(stdout); - - // Allocate and solve solution - OutputT *h_reference = new OutputT[num_segments]; - SolutionT::Solve(h_in, h_reference, num_segments, h_segment_offsets, reduction_op); - - // Run test - Test(Int2Type(), d_in, num_items, num_segments, d_segment_offsets, reduction_op, h_reference); - - // Cleanup - if (h_reference) delete[] h_reference; -} - - -/// Test specific problem type -template < - Backend BACKEND, - typename InputT, - typename OutputT, - typename ReductionOpT> -void TestProblem( - int num_items, - int num_segments, - GenMode gen_mode, - ReductionOpT reduction_op) -{ - printf("\n\nInitializing %d %s->%s (gen mode %d)... ", num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout); - fflush(stdout); - - // Initialize value data - InputT* h_in = new InputT[num_items]; - Initialize(gen_mode, h_in, num_items); - - // Initialize segment data - int *h_segment_offsets = new int[num_segments + 1]; - InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input); - - // Initialize device data - int *d_segment_offsets = NULL; - InputT *d_in = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(int) * (num_segments + 1))); - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(InputT) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(int) * (num_segments + 1), cudaMemcpyHostToDevice)); - - SolveAndTest(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, reduction_op); - - if (h_segment_offsets) delete[] h_segment_offsets; - if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets)); - if (h_in) delete[] h_in; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); -} - - -/// Test different operators -template < - Backend BACKEND, - typename OutputT, - typename HostInputIteratorT, - typename DeviceInputIteratorT> -void TestByOp( - HostInputIteratorT h_in, - DeviceInputIteratorT d_in, - int num_items, - int num_segments, - int *h_segment_offsets, - int *d_segment_offsets) -{ - SolveAndTest(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, CustomMax()); - SolveAndTest(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Sum()); - SolveAndTest(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Min()); - SolveAndTest(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMin()); - SolveAndTest(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Max()); - SolveAndTest(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMax()); -} - - -/// Test different backends -template < - typename InputT, - typename OutputT> -void TestByBackend( - int num_items, - int max_segments, - GenMode gen_mode) -{ - // Initialize host data - printf("\n\nInitializing %d %s -> %s (gen mode %d)... ", - num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout); - - InputT *h_in = new InputT[num_items]; - int *h_segment_offsets = new int[max_segments + 1]; - Initialize(gen_mode, h_in, num_items); - - // Initialize device data - InputT *d_in = NULL; - int *d_segment_offsets = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(int) * (max_segments + 1))); - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(InputT) * num_items, cudaMemcpyHostToDevice)); - - // - // Test single-segment implementations - // - - InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input); - - // Page-aligned-input tests - TestByOp(h_in, d_in, num_items, 1, h_segment_offsets, NULL); // Host-dispatch -#ifdef CUB_CDP - TestByOp(h_in, d_in, num_items, 1, h_segment_offsets, NULL); // Device-dispatch -#endif - - // Non-page-aligned-input tests - if (num_items > 1) - { - InitializeSegments(num_items - 1, 1, h_segment_offsets, g_verbose_input); - TestByOp(h_in + 1, d_in + 1, num_items - 1, 1, h_segment_offsets, NULL); - } - - // - // Test segmented implementation - // - - // Right now we assign a single thread block to each segment, so lets keep it to under 128K items per segment - int max_items_per_segment = 128000; - - for (int num_segments = (num_items + max_items_per_segment - 1) / max_items_per_segment; - num_segments < max_segments; - num_segments = (num_segments * 32) + 1) - { - InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input); - CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(int) * (num_segments + 1), cudaMemcpyHostToDevice)); - TestByOp(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets); - } - - if (h_in) delete[] h_in; - if (h_segment_offsets) delete[] h_segment_offsets; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets)); -} - - -/// Test different input-generation modes -template < - typename InputT, - typename OutputT> -void TestByGenMode( - int num_items, - int max_segments) -{ - // - // Test pointer support using different input-generation modes - // - - TestByBackend(num_items, max_segments, UNIFORM); - TestByBackend(num_items, max_segments, INTEGER_SEED); - TestByBackend(num_items, max_segments, RANDOM); - - // - // Test iterator support using a constant-iterator and SUM - // - - InputT val; - InitValue(UNIFORM, val, 0); - ConstantInputIterator h_in(val); - - int *h_segment_offsets = new int[1 + 1]; - InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input); - - SolveAndTest(h_in, h_in, num_items, 1, h_segment_offsets, NULL, Sum()); -#ifdef CUB_CDP - SolveAndTest(h_in, h_in, num_items, 1, h_segment_offsets, NULL, Sum()); -#endif - - if (h_segment_offsets) delete[] h_segment_offsets; -} - - -/// Test different problem sizes -template < - typename InputT, - typename OutputT> -struct TestBySize -{ - int max_items; - int max_segments; - - TestBySize(int max_items, int max_segments) : - max_items(max_items), - max_segments(max_segments) - {} - - template - cudaError_t Invoke() - { - // - // Black-box testing on all backends - // - - // Test 0, 1, many - TestByGenMode(0, max_segments); - TestByGenMode(1, max_segments); - TestByGenMode(max_items, max_segments); - - // Test random problem sizes from a log-distribution [8, max_items-ish) - int num_iterations = 8; - double max_exp = log(double(max_items)) / log(double(2.0)); - for (int i = 0; i < num_iterations; ++i) - { - int num_items = (int) pow(2.0, RandomValue(max_exp - 3.0) + 3.0); - TestByGenMode(num_items, max_segments); - } - - // - // White-box testing of single-segment problems around specific sizes - // - - // Tile-boundaries: multiple blocks, one tile per block - int tile_size = ActivePolicyT::ReducePolicy::BLOCK_THREADS * ActivePolicyT::ReducePolicy::ITEMS_PER_THREAD; - TestProblem(tile_size * 4, 1, RANDOM, Sum()); - TestProblem(tile_size * 4 + 1, 1, RANDOM, Sum()); - TestProblem(tile_size * 4 - 1, 1, RANDOM, Sum()); - - // Tile-boundaries: multiple blocks, multiple tiles per block - int sm_occupancy = 32; - int occupancy = tile_size * sm_occupancy * g_sm_count; - TestProblem(occupancy, 1, RANDOM, Sum()); - TestProblem(occupancy + 1, 1, RANDOM, Sum()); - TestProblem(occupancy - 1, 1, RANDOM, Sum()); - - return cudaSuccess; - } -}; - - -/// Test problem type -template < - typename InputT, - typename OutputT> -void TestType( - int max_items, - int max_segments) -{ - typedef typename DeviceReducePolicy::MaxPolicy MaxPolicyT; - - TestBySize dispatch(max_items, max_segments); - - MaxPolicyT::Invoke(g_ptx_version, dispatch); -} - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - - -/** - * Main - */ -int main(int argc, char** argv) -{ - int max_items = 27000000; - int max_segments = 34000; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - g_verbose_input = args.CheckCmdLineFlag("v2"); - args.GetCmdLineArgument("n", max_items); - args.GetCmdLineArgument("s", max_segments); - args.GetCmdLineArgument("i", g_timing_iterations); - args.GetCmdLineArgument("repeat", g_repeat); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--s= " - "[--i= " - "[--device=] " - "[--repeat=]" - "[--v] " - "[--cdp]" - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Get ptx version - CubDebugExit(PtxVersion(g_ptx_version)); - - // Get SM count - g_sm_count = args.deviceProp.multiProcessorCount; - - std::numeric_limits::max(); - -#ifdef QUICKER_TEST - - // Compile/run basic test - - - - TestProblem( max_items, 1, RANDOM, Sum()); - - TestProblem( max_items, 1, RANDOM, Sum()); - - TestProblem( max_items, 1, RANDOM, ArgMax()); - - TestProblem( max_items, 1, RANDOM, Sum()); - - TestProblem(max_items, max_segments, RANDOM, Sum()); - - -#elif defined(QUICK_TEST) - - // Compile/run quick comparison tests - - TestProblem( max_items * 4, 1, UNIFORM, Sum()); - TestProblem( max_items * 4, 1, UNIFORM, Sum()); - - printf("\n----------------------------\n"); - TestProblem( max_items * 2, 1, UNIFORM, Sum()); - TestProblem( max_items * 2, 1, UNIFORM, Sum()); - - printf("\n----------------------------\n"); - TestProblem( max_items, 1, UNIFORM, Sum()); - TestProblem( max_items, 1, UNIFORM, Sum()); - - printf("\n----------------------------\n"); - TestProblem( max_items / 2, 1, UNIFORM, Sum()); - TestProblem( max_items / 2, 1, UNIFORM, Sum()); - - printf("\n----------------------------\n"); - TestProblem( max_items / 4, 1, UNIFORM, Max()); - TestProblem( max_items / 4, 1, UNIFORM, Max()); - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - // Test different input types - TestType(max_items, max_segments); - TestType(max_items, max_segments); - - TestType(max_items, max_segments); - - TestType(max_items, max_segments); - TestType(max_items, max_segments); - TestType(max_items, max_segments); - TestType(max_items, max_segments); - - TestType(max_items, max_segments); - TestType(max_items, max_segments); - TestType(max_items, max_segments); - TestType(max_items, max_segments); - - TestType(max_items, max_segments); - TestType(max_items, max_segments); - } - -#endif - - - printf("\n"); - return 0; -} - - - diff --git a/ml-xgboost/cub/test/test_device_reduce_by_key.cu b/ml-xgboost/cub/test/test_device_reduce_by_key.cu deleted file mode 100644 index 7daaea3..0000000 --- a/ml-xgboost/cub/test/test_device_reduce_by_key.cu +++ /dev/null @@ -1,853 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of DeviceReduce::ReduceByKey utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -int g_timing_iterations = 0; -int g_repeat = 0; -CachingDeviceAllocator g_allocator(true); - -// Dispatch types -enum Backend -{ - CUB, // CUB method - THRUST, // Thrust method - CDP, // GPU-based (dynamic parallelism) dispatch to CUB method -}; - - -//--------------------------------------------------------------------- -// Dispatch to different CUB entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to reduce-by-key entrypoint - */ -template < - typename KeyInputIteratorT, - typename KeyOutputIteratorT, - typename ValueInputIteratorT, - typename ValueOutputIteratorT, - typename NumRunsIteratorT, - typename EqualityOpT, - typename ReductionOpT, - typename OffsetT> -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void *d_temp_storage, - size_t &temp_storage_bytes, - KeyInputIteratorT d_keys_in, - KeyOutputIteratorT d_keys_out, - ValueInputIteratorT d_values_in, - ValueOutputIteratorT d_values_out, - NumRunsIteratorT d_num_runs, - EqualityOpT equality_op, - ReductionOpT reduction_op, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceReduce::ReduceByKey( - d_temp_storage, - temp_storage_bytes, - d_keys_in, - d_keys_out, - d_values_in, - d_values_out, - d_num_runs, - reduction_op, - num_items, - stream, - debug_synchronous); - } - return error; -} - - -//--------------------------------------------------------------------- -// Dispatch to different Thrust entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to reduce-by-key entrypoint - */ -template < - typename KeyInputIteratorT, - typename KeyOutputIteratorT, - typename ValueInputIteratorT, - typename ValueOutputIteratorT, - typename NumRunsIteratorT, - typename EqualityOpT, - typename ReductionOpT, - typename OffsetT> -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void *d_temp_storage, - size_t &temp_storage_bytes, - KeyInputIteratorT d_keys_in, - KeyOutputIteratorT d_keys_out, - ValueInputIteratorT d_values_in, - ValueOutputIteratorT d_values_out, - NumRunsIteratorT d_num_runs, - EqualityOpT equality_op, - ReductionOpT reduction_op, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - // The input keys type - typedef typename std::iterator_traits::value_type KeyInputT; - - // The output keys type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type KeyOutputT; // ... else the output iterator's value type - - // The input values type - typedef typename std::iterator_traits::value_type ValueInputT; - - // The output values type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type ValueOuputT; // ... else the output iterator's value type - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_keys_in_wrapper(d_keys_in); - thrust::device_ptr d_keys_out_wrapper(d_keys_out); - - thrust::device_ptr d_values_in_wrapper(d_values_in); - thrust::device_ptr d_values_out_wrapper(d_values_out); - - thrust::pair, thrust::device_ptr > d_out_ends; - - for (int i = 0; i < timing_timing_iterations; ++i) - { - d_out_ends = thrust::reduce_by_key( - d_keys_in_wrapper, - d_keys_in_wrapper + num_items, - d_values_in_wrapper, - d_keys_out_wrapper, - d_values_out_wrapper); - } - - OffsetT num_segments = d_out_ends.first - d_keys_out_wrapper; - CubDebugExit(cudaMemcpy(d_num_runs, &num_segments, sizeof(OffsetT), cudaMemcpyHostToDevice)); - - } - - return cudaSuccess; -} - - - -//--------------------------------------------------------------------- -// CUDA Nested Parallelism Test Kernel -//--------------------------------------------------------------------- - -/** - * Simple wrapper kernel to invoke DeviceSelect - */ -template < - typename KeyInputIteratorT, - typename KeyOutputIteratorT, - typename ValueInputIteratorT, - typename ValueOutputIteratorT, - typename NumRunsIteratorT, - typename EqualityOpT, - typename ReductionOpT, - typename OffsetT> -__global__ void CnpDispatchKernel( - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void *d_temp_storage, - size_t temp_storage_bytes, - KeyInputIteratorT d_keys_in, - KeyOutputIteratorT d_keys_out, - ValueInputIteratorT d_values_in, - ValueOutputIteratorT d_values_out, - NumRunsIteratorT d_num_runs, - EqualityOpT equality_op, - ReductionOpT reduction_op, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - -#ifndef CUB_CDP - *d_cdp_error = cudaErrorNotSupported; -#else - *d_cdp_error = Dispatch(Int2Type(), timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, d_num_runs, equality_op, reduction_op, num_items, 0, debug_synchronous); - - *d_temp_storage_bytes = temp_storage_bytes; -#endif -} - - -/** - * Dispatch to CDP kernel - */ -template < - typename KeyInputIteratorT, - typename KeyOutputIteratorT, - typename ValueInputIteratorT, - typename ValueOutputIteratorT, - typename NumRunsIteratorT, - typename EqualityOpT, - typename ReductionOpT, - typename OffsetT> -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void *d_temp_storage, - size_t &temp_storage_bytes, - KeyInputIteratorT d_keys_in, - KeyOutputIteratorT d_keys_out, - ValueInputIteratorT d_values_in, - ValueOutputIteratorT d_values_out, - NumRunsIteratorT d_num_runs, - EqualityOpT equality_op, - ReductionOpT reduction_op, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to invoke device-side dispatch - CnpDispatchKernel<<<1,1>>>(timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, d_num_runs, equality_op, reduction_op, num_items, 0, debug_synchronous); - - // Copy out temp_storage_bytes - CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost)); - - // Copy out error - cudaError_t retval; - CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost)); - return retval; -} - - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - - -/** - * Initialize problem - */ -template -void Initialize( - int entropy_reduction, - T *h_in, - int num_items, - int max_segment) -{ - unsigned int max_int = (unsigned int) -1; - - int key = 0; - int i = 0; - while (i < num_items) - { - // Select number of repeating occurrences - - int repeat; - - if (max_segment < 0) - { - repeat = num_items; - } - else if (max_segment < 2) - { - repeat = 1; - } - else - { - RandomBits(repeat, entropy_reduction); - repeat = (int) ((double(repeat) * double(max_segment)) / double(max_int)); - repeat = CUB_MAX(1, repeat); - } - - int j = i; - while (j < CUB_MIN(i + repeat, num_items)) - { - InitValue(INTEGER_SEED, h_in[j], key); - j++; - } - - i = j; - key++; - } - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n\n"); - } -} - - -/** - * Solve problem. Returns total number of segments identified - */ -template < - typename KeyInputIteratorT, - typename ValueInputIteratorT, - typename KeyT, - typename ValueT, - typename EqualityOpT, - typename ReductionOpT> -int Solve( - KeyInputIteratorT h_keys_in, - KeyT *h_keys_reference, - ValueInputIteratorT h_values_in, - ValueT *h_values_reference, - EqualityOpT equality_op, - ReductionOpT reduction_op, - int num_items) -{ - // First item - KeyT previous = h_keys_in[0]; - ValueT aggregate = h_values_in[0]; - int num_segments = 0; - - // Subsequent items - for (int i = 1; i < num_items; ++i) - { - if (!equality_op(previous, h_keys_in[i])) - { - h_keys_reference[num_segments] = previous; - h_values_reference[num_segments] = aggregate; - num_segments++; - aggregate = h_values_in[i]; - } - else - { - aggregate = reduction_op(aggregate, h_values_in[i]); - } - previous = h_keys_in[i]; - } - - h_keys_reference[num_segments] = previous; - h_values_reference[num_segments] = aggregate; - num_segments++; - - return num_segments; -} - - - -/** - * Test DeviceSelect for a given problem input - */ -template < - Backend BACKEND, - typename DeviceKeyInputIteratorT, - typename DeviceValueInputIteratorT, - typename KeyT, - typename ValueT, - typename EqualityOpT, - typename ReductionOpT> -void Test( - DeviceKeyInputIteratorT d_keys_in, - DeviceValueInputIteratorT d_values_in, - KeyT* h_keys_reference, - ValueT* h_values_reference, - EqualityOpT equality_op, - ReductionOpT reduction_op, - int num_segments, - int num_items) -{ - // Allocate device output arrays and number of segments - KeyT* d_keys_out = NULL; - ValueT* d_values_out = NULL; - int* d_num_runs = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys_out, sizeof(KeyT) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values_out, sizeof(ValueT) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_runs, sizeof(int))); - - // Allocate CDP device arrays - size_t *d_temp_storage_bytes = NULL; - cudaError_t *d_cdp_error = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1)); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(Dispatch(Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, d_num_runs, equality_op, reduction_op, num_items, 0, true)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Clear device output arrays - CubDebugExit(cudaMemset(d_keys_out, 0, sizeof(KeyT) * num_items)); - CubDebugExit(cudaMemset(d_values_out, 0, sizeof(ValueT) * num_items)); - CubDebugExit(cudaMemset(d_num_runs, 0, sizeof(int))); - - // Run warmup/correctness iteration - CubDebugExit(Dispatch(Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, d_num_runs, equality_op, reduction_op, num_items, 0, true)); - - // Check for correctness (and display results, if specified) - int compare1 = CompareDeviceResults(h_keys_reference, d_keys_out, num_segments, true, g_verbose); - printf("\t Keys %s ", compare1 ? "FAIL" : "PASS"); - - int compare2 = CompareDeviceResults(h_values_reference, d_values_out, num_segments, true, g_verbose); - printf("\t Values %s ", compare2 ? "FAIL" : "PASS"); - - int compare3 = CompareDeviceResults(&num_segments, d_num_runs, 1, true, g_verbose); - printf("\t Count %s ", compare3 ? "FAIL" : "PASS"); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Performance - GpuTimer gpu_timer; - gpu_timer.Start(); - CubDebugExit(Dispatch(Int2Type(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, d_num_runs, equality_op, reduction_op, num_items, 0, false)); - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - // Display performance - if (g_timing_iterations > 0) - { - float avg_millis = elapsed_millis / g_timing_iterations; - float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f; - int bytes_moved = ((num_items + num_segments) * sizeof(KeyT)) + ((num_items + num_segments) * sizeof(ValueT)); - float giga_bandwidth = float(bytes_moved) / avg_millis / 1000.0f / 1000.0f; - printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s", avg_millis, giga_rate, giga_bandwidth); - } - printf("\n\n"); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Cleanup - if (d_keys_out) CubDebugExit(g_allocator.DeviceFree(d_keys_out)); - if (d_values_out) CubDebugExit(g_allocator.DeviceFree(d_values_out)); - if (d_num_runs) CubDebugExit(g_allocator.DeviceFree(d_num_runs)); - if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); - if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - // Correctness asserts - AssertEquals(0, compare1 | compare2 | compare3); -} - - -/** - * Test DeviceSelect on pointer type - */ -template < - Backend BACKEND, - typename KeyT, - typename ValueT, - typename ReductionOpT> -void TestPointer( - int num_items, - int entropy_reduction, - int max_segment, - ReductionOpT reduction_op) -{ - // Allocate host arrays - KeyT* h_keys_in = new KeyT[num_items]; - KeyT* h_keys_reference = new KeyT[num_items]; - - ValueT* h_values_in = new ValueT[num_items]; - ValueT* h_values_reference = new ValueT[num_items]; - - for (int i = 0; i < num_items; ++i) - InitValue(INTEGER_SEED, h_values_in[i], 1); - - // Initialize problem and solution - Equality equality_op; - Initialize(entropy_reduction, h_keys_in, num_items, max_segment); - int num_segments = Solve(h_keys_in, h_keys_reference, h_values_in, h_values_reference, equality_op, reduction_op, num_items); - - printf("\nPointer %s cub::DeviceReduce::ReduceByKey %s reduction of %d items, %d segments (avg run length %.3f), {%s,%s} key value pairs, max_segment %d, entropy_reduction %d\n", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", - (Equals::VALUE) ? "Sum" : "Max", - num_items, num_segments, float(num_items) / num_segments, - typeid(KeyT).name(), typeid(ValueT).name(), - max_segment, entropy_reduction); - fflush(stdout); - - // Allocate problem device arrays - KeyT *d_keys_in = NULL; - ValueT *d_values_in = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys_in, sizeof(KeyT) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values_in, sizeof(ValueT) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_keys_in, h_keys_in, sizeof(KeyT) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_values_in, h_values_in, sizeof(ValueT) * num_items, cudaMemcpyHostToDevice)); - - // Run Test - Test(d_keys_in, d_values_in, h_keys_reference, h_values_reference, equality_op, reduction_op, num_segments, num_items); - - // Cleanup - if (h_keys_in) delete[] h_keys_in; - if (h_values_in) delete[] h_values_in; - if (h_keys_reference) delete[] h_keys_reference; - if (h_values_reference) delete[] h_values_reference; - if (d_keys_in) CubDebugExit(g_allocator.DeviceFree(d_keys_in)); - if (d_values_in) CubDebugExit(g_allocator.DeviceFree(d_values_in)); -} - - -/** - * Test on iterator type - */ -template < - Backend BACKEND, - typename KeyT, - typename ValueT, - typename ReductionOpT> -void TestIterator( - int num_items, - int entropy_reduction, - int max_segment, - ReductionOpT reduction_op) -{ - // Allocate host arrays - KeyT* h_keys_in = new KeyT[num_items]; - KeyT* h_keys_reference = new KeyT[num_items]; - - ValueT one_val; - InitValue(INTEGER_SEED, one_val, 1); - ConstantInputIterator h_values_in(one_val); - ValueT* h_values_reference = new ValueT[num_items]; - - // Initialize problem and solution - Equality equality_op; - Initialize(entropy_reduction, h_keys_in, num_items, max_segment); - int num_segments = Solve(h_keys_in, h_keys_reference, h_values_in, h_values_reference, equality_op, reduction_op, num_items); - - printf("\nIterator %s cub::DeviceReduce::ReduceByKey %s reduction of %d items, %d segments (avg run length %.3f), {%s,%s} key value pairs, max_segment %d, entropy_reduction %d\n", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", - (Equals::VALUE) ? "Sum" : "Max", - num_items, num_segments, float(num_items) / num_segments, - typeid(KeyT).name(), typeid(ValueT).name(), - max_segment, entropy_reduction); - fflush(stdout); - - // Allocate problem device arrays - KeyT *d_keys_in = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys_in, sizeof(KeyT) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_keys_in, h_keys_in, sizeof(KeyT) * num_items, cudaMemcpyHostToDevice)); - - // Run Test - Test(d_keys_in, h_values_in, h_keys_reference, h_values_reference, equality_op, reduction_op, num_segments, num_items); - - // Cleanup - if (h_keys_in) delete[] h_keys_in; - if (h_keys_reference) delete[] h_keys_reference; - if (h_values_reference) delete[] h_values_reference; - if (d_keys_in) CubDebugExit(g_allocator.DeviceFree(d_keys_in)); -} - - -/** - * Test different gen modes - */ -template < - Backend BACKEND, - typename KeyT, - typename ValueT, - typename ReductionOpT> -void Test( - int num_items, - ReductionOpT reduction_op, - int max_segment) -{ - // 0 key-bit entropy reduction rounds - TestPointer(num_items, 0, max_segment, reduction_op); - - if (max_segment > 1) - { - // 2 key-bit entropy reduction rounds - TestPointer(num_items, 2, max_segment, reduction_op); - - // 7 key-bit entropy reduction rounds - TestPointer(num_items, 7, max_segment, reduction_op); - } -} - - -/** - * Test different avg segment lengths modes - */ -template < - Backend BACKEND, - typename KeyT, - typename ValueT, - typename ReductionOpT> -void Test( - int num_items, - ReductionOpT reduction_op) -{ - Test(num_items, reduction_op, -1); - Test(num_items, reduction_op, 1); - - // Evaluate different max-segment lengths - for (int max_segment = 3; max_segment < CUB_MIN(num_items, (unsigned short) -1); max_segment *= 11) - { - Test(num_items, reduction_op, max_segment); - } -} - - - -/** - * Test different dispatch - */ -template < - typename KeyT, - typename ValueT, - typename ReductionOpT> -void TestDispatch( - int num_items, - ReductionOpT reduction_op) -{ - Test(num_items, reduction_op); -#ifdef CUB_CDP - Test(num_items, reduction_op); -#endif -} - - -/** - * Test different input sizes - */ -template < - typename KeyT, - typename ValueT, - typename ReductionOpT> -void TestSize( - int num_items, - ReductionOpT reduction_op) -{ - if (num_items < 0) - { - TestDispatch(1, reduction_op); - TestDispatch(100, reduction_op); - TestDispatch(10000, reduction_op); - TestDispatch(1000000, reduction_op); - } - else - { - TestDispatch(num_items, reduction_op); - } - -} - - -template < - typename KeyT, - typename ValueT> -void TestOp( - int num_items) -{ - TestSize(num_items, cub::Sum()); - TestSize(num_items, cub::Max()); -} - - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = -1; - int entropy_reduction = 0; - int maxseg = 1000; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("i", g_timing_iterations); - args.GetCmdLineArgument("repeat", g_repeat); - args.GetCmdLineArgument("maxseg", maxseg); - args.GetCmdLineArgument("entropy", entropy_reduction); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--i= " - "[--device=] " - "[--maxseg=]" - "[--entropy=]" - "[--repeat=]" - "[--v] " - "[--cdp]" - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - printf("\n"); - - // Get ptx version - int ptx_version; - CubDebugExit(PtxVersion(ptx_version)); - -#ifdef QUICKER_TEST - - // Compile/run basic CUB test - if (num_items < 0) num_items = 32000000; - - TestPointer(num_items, entropy_reduction, maxseg, cub::Sum()); - TestPointer(num_items, entropy_reduction, maxseg, cub::Sum()); - TestIterator(num_items, entropy_reduction, maxseg, cub::Sum()); - -#elif defined(QUICK_TEST) - - // Compile/run quick tests - if (num_items < 0) num_items = 32000000; - - printf("---- RLE int ---- \n"); - TestIterator(num_items, entropy_reduction, maxseg, cub::Sum()); - - printf("---- RLE long long ---- \n"); - TestIterator(num_items, entropy_reduction, maxseg, cub::Sum()); - - printf("---- int ---- \n"); - TestPointer(num_items, entropy_reduction, maxseg, cub::Sum()); - TestPointer(num_items, entropy_reduction, maxseg, cub::Sum()); - - printf("---- float ---- \n"); - TestPointer(num_items, entropy_reduction, maxseg, cub::Sum()); - TestPointer(num_items, entropy_reduction, maxseg, cub::Sum()); - - if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted - { - printf("---- double ---- \n"); - TestPointer(num_items, entropy_reduction, maxseg, cub::Sum()); - TestPointer(num_items, entropy_reduction, maxseg, cub::Sum()); - } - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - - // Test different input types - TestOp(num_items); - TestOp(num_items); - TestOp(num_items); - TestOp(num_items); - TestOp(num_items); - TestOp(num_items); - if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted - TestOp(num_items); - - TestOp(num_items); - TestOp(num_items); - TestOp(num_items); - TestOp(num_items); - TestOp(num_items); - TestOp(num_items); - TestOp(num_items); - - TestOp(num_items); - TestOp(num_items); - TestOp(num_items); - TestOp(num_items); - - } - -#endif - - return 0; -} - - - diff --git a/ml-xgboost/cub/test/test_device_run_length_encode.cu b/ml-xgboost/cub/test/test_device_run_length_encode.cu deleted file mode 100644 index 872de15..0000000 --- a/ml-xgboost/cub/test/test_device_run_length_encode.cu +++ /dev/null @@ -1,890 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of DeviceReduce::RunLengthEncode utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -int g_timing_iterations = 0; -int g_repeat = 0; -CachingDeviceAllocator g_allocator(true); - -// Dispatch types -enum Backend -{ - CUB, // CUB method - THRUST, // Thrust method - CDP, // GPU-based (dynamic parallelism) dispatch to CUB method -}; - -// Operation types -enum RleMethod -{ - RLE, // Run length encode - NON_TRIVIAL, - CSR, -}; - - -//--------------------------------------------------------------------- -// Dispatch to different CUB entrypoints -//--------------------------------------------------------------------- - - -/** - * Dispatch to run-length encode entrypoint - */ -template < - typename InputIteratorT, - typename UniqueOutputIteratorT, - typename OffsetsOutputIteratorT, - typename LengthsOutputIteratorT, - typename NumRunsIterator, - typename OffsetT> -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type method, - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t &temp_storage_bytes, - InputIteratorT d_in, - UniqueOutputIteratorT d_unique_out, - OffsetsOutputIteratorT d_offsets_out, - LengthsOutputIteratorT d_lengths_out, - NumRunsIterator d_num_runs, - cub::Equality equality_op, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceRunLengthEncode::Encode( - d_temp_storage, - temp_storage_bytes, - d_in, - d_unique_out, - d_lengths_out, - d_num_runs, - num_items, - stream, - debug_synchronous); - } - return error; -} - - -/** - * Dispatch to non-trivial runs entrypoint - */ -template < - typename InputIteratorT, - typename UniqueOutputIteratorT, - typename OffsetsOutputIteratorT, - typename LengthsOutputIteratorT, - typename NumRunsIterator, - typename OffsetT> -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type method, - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t &temp_storage_bytes, - InputIteratorT d_in, - UniqueOutputIteratorT d_unique_out, - OffsetsOutputIteratorT d_offsets_out, - LengthsOutputIteratorT d_lengths_out, - NumRunsIterator d_num_runs, - cub::Equality equality_op, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceRunLengthEncode::NonTrivialRuns( - d_temp_storage, - temp_storage_bytes, - d_in, - d_offsets_out, - d_lengths_out, - d_num_runs, - num_items, - stream, - debug_synchronous); - } - return error; -} - - - -//--------------------------------------------------------------------- -// Dispatch to different Thrust entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to run-length encode entrypoint - */ -template < - typename InputIteratorT, - typename UniqueOutputIteratorT, - typename OffsetsOutputIteratorT, - typename LengthsOutputIteratorT, - typename NumRunsIterator, - typename OffsetT> -cudaError_t Dispatch( - Int2Type method, - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void *d_temp_storage, - size_t &temp_storage_bytes, - InputIteratorT d_in, - UniqueOutputIteratorT d_unique_out, - OffsetsOutputIteratorT d_offsets_out, - LengthsOutputIteratorT d_lengths_out, - NumRunsIterator d_num_runs, - cub::Equality equality_op, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type UniqueT; // ... else the output iterator's value type - - // The lengths output value type - typedef typename If<(Equals::value_type, void>::VALUE), // LengthT = (if output iterator's value type is void) ? - OffsetT, // ... then the OffsetT type, - typename std::iterator_traits::value_type>::Type LengthT; // ... else the output iterator's value type - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_in_wrapper(d_in); - thrust::device_ptr d_unique_out_wrapper(d_unique_out); - thrust::device_ptr d_lengths_out_wrapper(d_lengths_out); - - thrust::pair, thrust::device_ptr > d_out_ends; - - LengthT one_val; - InitValue(INTEGER_SEED, one_val, 1); - thrust::constant_iterator constant_one(one_val); - - for (int i = 0; i < timing_timing_iterations; ++i) - { - d_out_ends = thrust::reduce_by_key( - d_in_wrapper, - d_in_wrapper + num_items, - constant_one, - d_unique_out_wrapper, - d_lengths_out_wrapper); - } - - OffsetT num_runs = d_out_ends.first - d_unique_out_wrapper; - CubDebugExit(cudaMemcpy(d_num_runs, &num_runs, sizeof(OffsetT), cudaMemcpyHostToDevice)); - } - - return cudaSuccess; -} - - - -//--------------------------------------------------------------------- -// CUDA Nested Parallelism Test Kernel -//--------------------------------------------------------------------- - -/** - * Simple wrapper kernel to invoke DeviceRunLengthEncode - */ -template < - int RLE_METHOD, - typename InputIteratorT, - typename UniqueOutputIteratorT, - typename OffsetsOutputIteratorT, - typename LengthsOutputIteratorT, - typename NumRunsIterator, - typename EqualityOp, - typename OffsetT> -__global__ void CnpDispatchKernel( - Int2Type method, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t temp_storage_bytes, - InputIteratorT d_in, - UniqueOutputIteratorT d_unique_out, - OffsetsOutputIteratorT d_offsets_out, - LengthsOutputIteratorT d_lengths_out, - NumRunsIterator d_num_runs, - cub::Equality equality_op, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - -#ifndef CUB_CDP - *d_cdp_error = cudaErrorNotSupported; -#else - *d_cdp_error = Dispatch(method, Int2Type(), timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_offsets_out, d_lengths_out, d_num_runs, equality_op, num_items, 0, debug_synchronous); - - *d_temp_storage_bytes = temp_storage_bytes; -#endif -} - - -/** - * Dispatch to CDP kernel - */ -template < - int RLE_METHOD, - typename InputIteratorT, - typename UniqueOutputIteratorT, - typename OffsetsOutputIteratorT, - typename LengthsOutputIteratorT, - typename NumRunsIterator, - typename EqualityOp, - typename OffsetT> -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type method, - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t &temp_storage_bytes, - InputIteratorT d_in, - UniqueOutputIteratorT d_unique_out, - OffsetsOutputIteratorT d_offsets_out, - LengthsOutputIteratorT d_lengths_out, - NumRunsIterator d_num_runs, - EqualityOp equality_op, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to invoke device-side dispatch - CnpDispatchKernel<<<1,1>>>(method, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_offsets_out, d_lengths_out, d_num_runs, equality_op, num_items, 0, debug_synchronous); - - // Copy out temp_storage_bytes - CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost)); - - // Copy out error - cudaError_t retval; - CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost)); - return retval; -} - - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - - -/** - * Initialize problem - */ -template -void Initialize( - int entropy_reduction, - T *h_in, - int num_items, - int max_segment) -{ - unsigned int max_int = (unsigned int) -1; - - int key = 0; - int i = 0; - while (i < num_items) - { - // Select number of repeating occurrences for the current run - int repeat; - if (max_segment < 0) - { - repeat = num_items; - } - else if (max_segment < 2) - { - repeat = 1; - } - else - { - RandomBits(repeat, entropy_reduction); - repeat = (int) ((double(repeat) * double(max_segment)) / double(max_int)); - repeat = CUB_MAX(1, repeat); - } - - int j = i; - while (j < CUB_MIN(i + repeat, num_items)) - { - InitValue(INTEGER_SEED, h_in[j], key); - j++; - } - - i = j; - key++; - } - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n\n"); - } -} - - -/** - * Solve problem. Returns total number of segments identified - */ -template < - RleMethod RLE_METHOD, - typename InputIteratorT, - typename T, - typename OffsetT, - typename LengthT, - typename EqualityOp> -int Solve( - InputIteratorT h_in, - T *h_unique_reference, - OffsetT *h_offsets_reference, - LengthT *h_lengths_reference, - EqualityOp equality_op, - int num_items) -{ - if (num_items == 0) - return 0; - - // First item - T previous = h_in[0]; - LengthT length = 1; - int num_runs = 0; - int run_begin = 0; - - // Subsequent items - for (int i = 1; i < num_items; ++i) - { - if (!equality_op(previous, h_in[i])) - { - if ((RLE_METHOD != NON_TRIVIAL) || (length > 1)) - { - h_unique_reference[num_runs] = previous; - h_offsets_reference[num_runs] = run_begin; - h_lengths_reference[num_runs] = length; - num_runs++; - } - length = 1; - run_begin = i; - } - else - { - length++; - } - previous = h_in[i]; - } - - if ((RLE_METHOD != NON_TRIVIAL) || (length > 1)) - { - h_unique_reference[num_runs] = previous; - h_offsets_reference[num_runs] = run_begin; - h_lengths_reference[num_runs] = length; - num_runs++; - } - - return num_runs; -} - - - -/** - * Test DeviceRunLengthEncode for a given problem input - */ -template < - RleMethod RLE_METHOD, - Backend BACKEND, - typename DeviceInputIteratorT, - typename T, - typename OffsetT, - typename LengthT, - typename EqualityOp> -void Test( - DeviceInputIteratorT d_in, - T *h_unique_reference, - OffsetT *h_offsets_reference, - LengthT *h_lengths_reference, - EqualityOp equality_op, - int num_runs, - int num_items) -{ - // Allocate device output arrays and number of segments - T* d_unique_out = NULL; - LengthT* d_offsets_out = NULL; - OffsetT* d_lengths_out = NULL; - int* d_num_runs = NULL; - - if (RLE_METHOD == RLE) - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_unique_out, sizeof(T) * num_items)); - if (RLE_METHOD == NON_TRIVIAL) - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_offsets_out, sizeof(OffsetT) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_lengths_out, sizeof(LengthT) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_runs, sizeof(int))); - - // Allocate CDP device arrays - size_t* d_temp_storage_bytes = NULL; - cudaError_t* d_cdp_error = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1)); - - // Allocate temporary storage - void* d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(Dispatch(Int2Type(), Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_offsets_out, d_lengths_out, d_num_runs, equality_op, num_items, 0, true)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Clear device output arrays - if (RLE_METHOD == RLE) - CubDebugExit(cudaMemset(d_unique_out, 0, sizeof(T) * num_items)); - if (RLE_METHOD == NON_TRIVIAL) - CubDebugExit(cudaMemset(d_offsets_out, 0, sizeof(OffsetT) * num_items)); - CubDebugExit(cudaMemset(d_lengths_out, 0, sizeof(LengthT) * num_items)); - CubDebugExit(cudaMemset(d_num_runs, 0, sizeof(int))); - - // Run warmup/correctness iteration - CubDebugExit(Dispatch(Int2Type(), Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_offsets_out, d_lengths_out, d_num_runs, equality_op, num_items, 0, true)); - - // Check for correctness (and display results, if specified) - int compare0 = 0; - int compare1 = 0; - int compare2 = 0; - int compare3 = 0; - - if (RLE_METHOD == RLE) - { - compare0 = CompareDeviceResults(h_unique_reference, d_unique_out, num_runs, true, g_verbose); - printf("\t Keys %s\n", compare0 ? "FAIL" : "PASS"); - } - - if (RLE_METHOD != RLE) - { - compare1 = CompareDeviceResults(h_offsets_reference, d_offsets_out, num_runs, true, g_verbose); - printf("\t Offsets %s\n", compare1 ? "FAIL" : "PASS"); - } - - if (RLE_METHOD != CSR) - { - compare2 = CompareDeviceResults(h_lengths_reference, d_lengths_out, num_runs, true, g_verbose); - printf("\t Lengths %s\n", compare2 ? "FAIL" : "PASS"); - } - - compare3 = CompareDeviceResults(&num_runs, d_num_runs, 1, true, g_verbose); - printf("\t Count %s\n", compare3 ? "FAIL" : "PASS"); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Performance - GpuTimer gpu_timer; - gpu_timer.Start(); - CubDebugExit(Dispatch(Int2Type(), Int2Type(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_offsets_out, d_lengths_out, d_num_runs, equality_op, num_items, 0, false)); - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - // Display performance - if (g_timing_iterations > 0) - { - float avg_millis = elapsed_millis / g_timing_iterations; - float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f; - int bytes_moved = (num_items * sizeof(T)) + (num_runs * (sizeof(OffsetT) + sizeof(LengthT))); - float giga_bandwidth = float(bytes_moved) / avg_millis / 1000.0f / 1000.0f; - printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s", avg_millis, giga_rate, giga_bandwidth); - } - printf("\n\n"); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Cleanup - if (d_unique_out) CubDebugExit(g_allocator.DeviceFree(d_unique_out)); - if (d_offsets_out) CubDebugExit(g_allocator.DeviceFree(d_offsets_out)); - if (d_lengths_out) CubDebugExit(g_allocator.DeviceFree(d_lengths_out)); - if (d_num_runs) CubDebugExit(g_allocator.DeviceFree(d_num_runs)); - if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); - if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - // Correctness asserts - AssertEquals(0, compare0 | compare1 | compare2 | compare3); -} - - -/** - * Test DeviceRunLengthEncode on pointer type - */ -template < - RleMethod RLE_METHOD, - Backend BACKEND, - typename T, - typename OffsetT, - typename LengthT> -void TestPointer( - int num_items, - int entropy_reduction, - int max_segment) -{ - // Allocate host arrays - T* h_in = new T[num_items]; - T* h_unique_reference = new T[num_items]; - OffsetT* h_offsets_reference = new OffsetT[num_items]; - LengthT* h_lengths_reference = new LengthT[num_items]; - - for (int i = 0; i < num_items; ++i) - InitValue(INTEGER_SEED, h_offsets_reference[i], 1); - - // Initialize problem and solution - Equality equality_op; - Initialize(entropy_reduction, h_in, num_items, max_segment); - - int num_runs = Solve(h_in, h_unique_reference, h_offsets_reference, h_lengths_reference, equality_op, num_items); - - printf("\nPointer %s cub::%s on %d items, %d segments (avg run length %.3f), {%s key, %s offset, %s length}, max_segment %d, entropy_reduction %d\n", - (RLE_METHOD == RLE) ? "DeviceReduce::RunLengthEncode" : (RLE_METHOD == NON_TRIVIAL) ? "DeviceRunLengthEncode::NonTrivialRuns" : "Other", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", - num_items, num_runs, float(num_items) / num_runs, - typeid(T).name(), typeid(OffsetT).name(), typeid(LengthT).name(), - max_segment, entropy_reduction); - fflush(stdout); - - // Allocate problem device arrays - T* d_in = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice)); - - // Run Test - Test(d_in, h_unique_reference, h_offsets_reference, h_lengths_reference, equality_op, num_runs, num_items); - - // Cleanup - if (h_in) delete[] h_in; - if (h_unique_reference) delete[] h_unique_reference; - if (h_offsets_reference) delete[] h_offsets_reference; - if (h_lengths_reference) delete[] h_lengths_reference; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); -} - - -/** - * Test on iterator type - */ -template < - RleMethod RLE_METHOD, - Backend BACKEND, - typename T, - typename OffsetT, - typename LengthT> -void TestIterator( - int num_items, - Int2Type is_primitive) -{ - // Allocate host arrays - T* h_unique_reference = new T[num_items]; - OffsetT* h_offsets_reference = new OffsetT[num_items]; - LengthT* h_lengths_reference = new LengthT[num_items]; - - T one_val; - InitValue(INTEGER_SEED, one_val, 1); - ConstantInputIterator h_in(one_val); - - // Initialize problem and solution - Equality equality_op; - int num_runs = Solve(h_in, h_unique_reference, h_offsets_reference, h_lengths_reference, equality_op, num_items); - - printf("\nIterator %s cub::%s on %d items, %d segments (avg run length %.3f), {%s key, %s offset, %s length}\n", - (RLE_METHOD == RLE) ? "DeviceReduce::RunLengthEncode" : (RLE_METHOD == NON_TRIVIAL) ? "DeviceRunLengthEncode::NonTrivialRuns" : "Other", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", - num_items, num_runs, float(num_items) / num_runs, - typeid(T).name(), typeid(OffsetT).name(), typeid(LengthT).name()); - fflush(stdout); - - // Run Test - Test(h_in, h_unique_reference, h_offsets_reference, h_lengths_reference, equality_op, num_runs, num_items); - - // Cleanup - if (h_unique_reference) delete[] h_unique_reference; - if (h_offsets_reference) delete[] h_offsets_reference; - if (h_lengths_reference) delete[] h_lengths_reference; -} - - -template < - RleMethod RLE_METHOD, - Backend BACKEND, - typename T, - typename OffsetT, - typename LengthT> -void TestIterator( - int num_items, - Int2Type is_primitive) -{} - - -/** - * Test different gen modes - */ -template < - RleMethod RLE_METHOD, - Backend BACKEND, - typename T, - typename OffsetT, - typename LengthT> -void Test( - int num_items) -{ - // Test iterator (one run) - TestIterator(num_items, Int2Type::PRIMITIVE>()); - - // num_items runs - TestPointer(num_items, 0, 1); - - // Evaluate different run lengths - for (int max_segment = 3; max_segment < CUB_MIN(num_items, (unsigned short) -1); max_segment *= 3) - { - // Uniform selection run length - TestPointer(num_items, 0, max_segment); - - // Reduced-entropy run length - TestPointer(num_items, 4, max_segment); - } -} - - -/** - * Test different dispatch - */ -template < - typename T, - typename OffsetT, - typename LengthT> -void TestDispatch( - int num_items) -{ - Test(num_items); - Test(num_items); - -#ifdef CUB_CDP - Test(num_items); - Test(num_items); -#endif -} - - -/** - * Test different input sizes - */ -template < - typename T, - typename OffsetT, - typename LengthT> -void TestSize( - int num_items) -{ - if (num_items < 0) - { - TestDispatch(0); - TestDispatch(1); - TestDispatch(100); - TestDispatch(10000); - TestDispatch(1000000); - - // Randomly select problem size between 1:10,000,000 - unsigned int max_int = (unsigned int) -1; - for (int i = 0; i < 10; ++i) - { - unsigned int num_items; - RandomBits(num_items); - num_items = (unsigned int) ((double(num_items) * double(10000000)) / double(max_int)); - num_items = CUB_MAX(1, num_items); - TestDispatch(num_items); - } - } - else - { - TestDispatch(num_items); - } - -} - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = -1; - int entropy_reduction = 0; - int max_segment = 1000; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("i", g_timing_iterations); - args.GetCmdLineArgument("repeat", g_repeat); - args.GetCmdLineArgument("maxseg", max_segment); - args.GetCmdLineArgument("entropy", entropy_reduction); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--i= " - "[--device=] " - "[--maxseg=]" - "[--entropy=]" - "[--repeat=]" - "[--v] " - "[--cdp]" - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - printf("\n"); - - // Get ptx version - int ptx_version; - CubDebugExit(PtxVersion(ptx_version)); - -#ifdef QUICKER_TEST - - // Compile/run basic CUB test - if (num_items < 0) num_items = 32000000; - - TestPointer( num_items, entropy_reduction, max_segment); - TestPointer( num_items, entropy_reduction, max_segment); - TestIterator( num_items, Int2Type::PRIMITIVE>()); - - -#elif defined(QUICK_TEST) - - // Compile/run quick tests - if (num_items < 0) num_items = 32000000; - - TestPointer( num_items, entropy_reduction, max_segment); - TestPointer( num_items, entropy_reduction, max_segment); - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - // Test different input types - TestSize(num_items); - TestSize(num_items); - TestSize(num_items); - TestSize(num_items); - TestSize(num_items); - TestSize(num_items); - TestSize(num_items); - - TestSize(num_items); - TestSize(num_items); - TestSize(num_items); - TestSize(num_items); - TestSize(num_items); - TestSize(num_items); - TestSize(num_items); - } - -#endif - - return 0; -} - - - diff --git a/ml-xgboost/cub/test/test_device_scan.cu b/ml-xgboost/cub/test/test_device_scan.cu deleted file mode 100644 index 7a18884..0000000 --- a/ml-xgboost/cub/test/test_device_scan.cu +++ /dev/null @@ -1,1015 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of DeviceScan utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include - -#include -#include -#include -#include - -#include -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -int g_timing_iterations = 0; -int g_repeat = 0; -double g_device_giga_bandwidth; -CachingDeviceAllocator g_allocator(true); - -// Dispatch types -enum Backend -{ - CUB, // CUB method - THRUST, // Thrust method - CDP, // GPU-based (dynamic parallelism) dispatch to CUB method -}; - - -/** - * \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants) - */ -template -struct WrapperFunctor -{ - OpT op; - - WrapperFunctor(OpT op) : op(op) {} - - template - __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const - { - return op(a, b); - } -}; - - -//--------------------------------------------------------------------- -// Dispatch to different CUB DeviceScan entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to exclusive scan entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - IsPrimitiveT is_primitive, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - ScanOpT scan_op, - InitialValueT initial_value, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceScan::ExclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, scan_op, initial_value, num_items, stream, debug_synchronous); - } - return error; -} - - -/** - * Dispatch to exclusive sum entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_primitive, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - Sum scan_op, - InitialValueT initial_value, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); - } - return error; -} - - -/** - * Dispatch to inclusive scan entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - IsPrimitiveT is_primitive, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - ScanOpT scan_op, - NullType initial_value, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceScan::InclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, scan_op, num_items, stream, debug_synchronous); - } - return error; -} - - -/** - * Dispatch to inclusive sum entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_primitive, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - Sum scan_op, - NullType initial_value, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); - } - return error; -} - -//--------------------------------------------------------------------- -// Dispatch to different Thrust entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to exclusive scan entrypoint - */ -template -cudaError_t Dispatch( - Int2Type dispatch_to, - IsPrimitiveT is_primitive, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - ScanOpT scan_op, - InitialValueT initial_value, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_in_wrapper(d_in); - thrust::device_ptr d_out_wrapper(d_out); - for (int i = 0; i < timing_timing_iterations; ++i) - { - thrust::exclusive_scan(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper, initial_value, scan_op); - } - } - - return cudaSuccess; -} - - -/** - * Dispatch to exclusive sum entrypoint - */ -template -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_primitive, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - Sum scan_op, - InitialValueT initial_value, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_in_wrapper(d_in); - thrust::device_ptr d_out_wrapper(d_out); - for (int i = 0; i < timing_timing_iterations; ++i) - { - thrust::exclusive_scan(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper); - } - } - - return cudaSuccess; -} - - -/** - * Dispatch to inclusive scan entrypoint - */ -template -cudaError_t Dispatch( - Int2Type dispatch_to, - IsPrimitiveT is_primitive, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - ScanOpT scan_op, - NullType initial_value, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_in_wrapper(d_in); - thrust::device_ptr d_out_wrapper(d_out); - for (int i = 0; i < timing_timing_iterations; ++i) - { - thrust::inclusive_scan(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper, scan_op); - } - } - - return cudaSuccess; -} - - -/** - * Dispatch to inclusive sum entrypoint - */ -template -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_primitive, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - Sum scan_op, - NullType initial_value, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_in_wrapper(d_in); - thrust::device_ptr d_out_wrapper(d_out); - for (int i = 0; i < timing_timing_iterations; ++i) - { - thrust::inclusive_scan(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper); - } - } - - return cudaSuccess; -} - - - -//--------------------------------------------------------------------- -// CUDA Nested Parallelism Test Kernel -//--------------------------------------------------------------------- - -/** - * Simple wrapper kernel to invoke DeviceScan - */ -template -__global__ void CnpDispatchKernel( - IsPrimitiveT is_primitive, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - ScanOpT scan_op, - InitialValueT initial_value, - OffsetT num_items, - bool debug_synchronous) -{ -#ifndef CUB_CDP - *d_cdp_error = cudaErrorNotSupported; -#else - *d_cdp_error = Dispatch( - Int2Type(), - is_primitive, - timing_timing_iterations, - d_temp_storage_bytes, - d_cdp_error, - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - scan_op, - initial_value, - num_items, - 0, - debug_synchronous); - - *d_temp_storage_bytes = temp_storage_bytes; -#endif -} - - -/** - * Dispatch to CDP kernel - */ -template -cudaError_t Dispatch( - Int2Type dispatch_to, - IsPrimitiveT is_primitive, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - ScanOpT scan_op, - InitialValueT initial_value, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to invoke device-side dispatch - CnpDispatchKernel<<<1,1>>>( - is_primitive, - timing_timing_iterations, - d_temp_storage_bytes, - d_cdp_error, - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - scan_op, - initial_value, - num_items, - debug_synchronous); - - // Copy out temp_storage_bytes - CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost)); - - // Copy out error - cudaError_t retval; - CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost)); - return retval; -} - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - - -/** - * Initialize problem - */ -template -void Initialize( - GenMode gen_mode, - T *h_in, - int num_items) -{ - for (int i = 0; i < num_items; ++i) - { - InitValue(gen_mode, h_in[i], i); - } - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n\n"); - } -} - -/** - * Solve exclusive-scan problem - */ -template < - typename InputIteratorT, - typename OutputT, - typename ScanOpT> -void Solve( - InputIteratorT h_in, - OutputT *h_reference, - int num_items, - ScanOpT scan_op, - OutputT initial_value) -{ - if (num_items > 0) - { - OutputT val = h_in[0]; - h_reference[0] = initial_value; - OutputT inclusive = scan_op(initial_value, val); - - for (int i = 1; i < num_items; ++i) - { - val = h_in[i]; - h_reference[i] = inclusive; - inclusive = scan_op(inclusive, val); - } - } -} - - -/** - * Solve inclusive-scan problem - */ -template < - typename InputIteratorT, - typename OutputT, - typename ScanOpT> -void Solve( - InputIteratorT h_in, - OutputT *h_reference, - int num_items, - ScanOpT scan_op, - NullType) -{ - if (num_items > 0) - { - OutputT inclusive = h_in[0]; - h_reference[0] = inclusive; - - for (int i = 1; i < num_items; ++i) - { - OutputT val = h_in[i]; - inclusive = scan_op(inclusive, val); - h_reference[i] = inclusive; - } - } -} - - -/** - * Test DeviceScan for a given problem input - */ -template < - Backend BACKEND, - typename DeviceInputIteratorT, - typename OutputT, - typename ScanOpT, - typename InitialValueT> -void Test( - DeviceInputIteratorT d_in, - OutputT *h_reference, - int num_items, - ScanOpT scan_op, - InitialValueT initial_value) -{ - typedef typename std::iterator_traits::value_type InputT; - - // Allocate device output array - OutputT *d_out = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(OutputT) * num_items)); - - // Allocate CDP device arrays - size_t *d_temp_storage_bytes = NULL; - cudaError_t *d_cdp_error = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1)); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(Dispatch( - Int2Type(), - Int2Type::PRIMITIVE>(), - 1, - d_temp_storage_bytes, - d_cdp_error, - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - scan_op, - initial_value, - num_items, - 0, - true)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Clear device output array - CubDebugExit(cudaMemset(d_out, 0, sizeof(OutputT) * num_items)); - - // Run warmup/correctness iteration - CubDebugExit(Dispatch( - Int2Type(), - Int2Type::PRIMITIVE>(), - 1, - d_temp_storage_bytes, - d_cdp_error, - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - scan_op, - initial_value, - num_items, - 0, - true)); - - // Check for correctness (and display results, if specified) - int compare = CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose); - printf("\t%s", compare ? "FAIL" : "PASS"); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Performance - GpuTimer gpu_timer; - gpu_timer.Start(); - CubDebugExit(Dispatch(Int2Type(), - Int2Type::PRIMITIVE>(), - g_timing_iterations, - d_temp_storage_bytes, - d_cdp_error, - d_temp_storage, - temp_storage_bytes, - d_in, - d_out, - scan_op, - initial_value, - num_items, - 0, - false)); - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - // Display performance - if (g_timing_iterations > 0) - { - float avg_millis = elapsed_millis / g_timing_iterations; - float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f; - float giga_bandwidth = giga_rate * (sizeof(InputT) + sizeof(OutputT)); - printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0); - } - - printf("\n\n"); - - // Cleanup - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); - if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - // Correctness asserts - AssertEquals(0, compare); -} - - -/** - * Test DeviceScan on pointer type - */ -template < - Backend BACKEND, - typename InputT, - typename OutputT, - typename ScanOpT, - typename InitialValueT> -void TestPointer( - int num_items, - GenMode gen_mode, - ScanOpT scan_op, - InitialValueT initial_value) -{ - printf("\nPointer %s %s cub::DeviceScan::%s %d items, %s->%s (%d->%d bytes) , gen-mode %s\n", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", - (Equals::VALUE) ? "Inclusive" : "Exclusive", - (Equals::VALUE) ? "Sum" : "Scan", - num_items, - typeid(InputT).name(), typeid(OutputT).name(), (int) sizeof(InputT), (int) sizeof(OutputT), - (gen_mode == RANDOM) ? "RANDOM" : (gen_mode == INTEGER_SEED) ? "SEQUENTIAL" : "HOMOGENOUS"); - fflush(stdout); - - // Allocate host arrays - InputT* h_in = new InputT[num_items]; - OutputT* h_reference = new OutputT[num_items]; - - // Initialize problem and solution - Initialize(gen_mode, h_in, num_items); - Solve(h_in, h_reference, num_items, scan_op, initial_value); - - // Allocate problem device arrays - InputT *d_in = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(InputT) * num_items, cudaMemcpyHostToDevice)); - - // Run Test - Test(d_in, h_reference, num_items, scan_op, initial_value); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); -} - - -/** - * Test DeviceScan on iterator type - */ -template < - Backend BACKEND, - typename InputT, - typename OutputT, - typename ScanOpT, - typename InitialValueT> -void TestIterator( - int num_items, - ScanOpT scan_op, - InitialValueT initial_value) -{ - printf("\nIterator %s %s cub::DeviceScan::%s %d items, %s->%s (%d->%d bytes)\n", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", - (Equals::VALUE) ? "Inclusive" : "Exclusive", - (Equals::VALUE) ? "Sum" : "Scan", - num_items, - typeid(InputT).name(), typeid(OutputT).name(), (int) sizeof(InputT), (int) sizeof(OutputT)); - fflush(stdout); - - // Use a constant iterator as the input - InputT val = InputT(); - ConstantInputIterator h_in(val); - - // Allocate host arrays - OutputT* h_reference = new OutputT[num_items]; - - // Initialize problem and solution - Solve(h_in, h_reference, num_items, scan_op, initial_value); - - // Run Test - Test(h_in, h_reference, num_items, scan_op, initial_value); - - // Cleanup - if (h_reference) delete[] h_reference; -} - - -/** - * Test different gen modes - */ -template < - Backend BACKEND, - typename InputT, - typename OutputT, - typename ScanOpT, - typename InitialValueT> -void Test( - int num_items, - ScanOpT scan_op, - InitialValueT initial_value) -{ - TestPointer( num_items, UNIFORM, scan_op, initial_value); - TestPointer( num_items, RANDOM, scan_op, initial_value); - TestIterator( num_items, scan_op, initial_value); -} - - -/** - * Test different dispatch - */ -template < - typename InputT, - typename OutputT, - typename ScanOpT, - typename InitialValueT> -void Test( - int num_items, - ScanOpT scan_op, - InitialValueT initial_value) -{ - Test(num_items, scan_op, initial_value); -#ifdef CUB_CDP - Test(num_items, scan_op, initial_value); -#endif -} - - -/** - * Test different operators - */ -template -void TestOp( - int num_items, - OutputT identity, - OutputT initial_value) -{ - // Exclusive (use identity as initial value because it will dispatch to *Sum variants that don't take initial values) - Test(num_items, cub::Sum(), identity); - Test(num_items, cub::Max(), identity); - - // Exclusive (non-specialized, so we can test initial-value) - Test(num_items, WrapperFunctor(cub::Sum()), initial_value); - Test(num_items, WrapperFunctor(cub::Max()), initial_value); - - // Inclusive (no initial value) - Test(num_items, cub::Sum(), NullType()); - Test(num_items, cub::Max(), NullType()); -} - - -/** - * Test different input sizes - */ -template < - typename InputT, - typename OutputT> -void TestSize( - int num_items, - OutputT identity, - OutputT initial_value) -{ - if (num_items < 0) - { - TestOp(0, identity, initial_value); - TestOp(1, identity, initial_value); - TestOp(100, identity, initial_value); - TestOp(10000, identity, initial_value); - TestOp(1000000, identity, initial_value); - - // Randomly select problem size between 1:10,000,000 - unsigned int max_int = (unsigned int) -1; - for (int i = 0; i < 10; ++i) - { - unsigned int num_items; - RandomBits(num_items); - num_items = (unsigned int) ((double(num_items) * double(10000000)) / double(max_int)); - num_items = CUB_MAX(1, num_items); - TestOp(num_items, identity, initial_value); - } - } - else - { - TestOp(num_items, identity, initial_value); - } -} - - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = -1; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("i", g_timing_iterations); - args.GetCmdLineArgument("repeat", g_repeat); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--i= " - "[--device=] " - "[--repeat=]" - "[--v] " - "[--cdp]" - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - g_device_giga_bandwidth = args.device_giga_bandwidth; - printf("\n"); - -#ifdef QUICKER_TEST - - // Compile/run basic CUB test - if (num_items < 0) num_items = 32000000; - - TestPointer( num_items , UNIFORM, Sum(), (int) (0)); - TestPointer( num_items , UNIFORM, Sum(), (int) (0)); - -#elif defined(QUICK_TEST) - - // Get device ordinal - int device_ordinal; - CubDebugExit(cudaGetDevice(&device_ordinal)); - - // Get device SM version - int sm_version; - CubDebugExit(SmVersion(sm_version, device_ordinal)); - - // Compile/run quick tests - if (num_items < 0) num_items = 32000000; - - TestPointer( num_items * ((sm_version <= 130) ? 1 : 4), UNIFORM, Sum(), char(0)); - TestPointer( num_items * ((sm_version <= 130) ? 1 : 4), UNIFORM, Sum(), char(0)); - - printf("----------------------------\n"); - TestPointer( num_items * ((sm_version <= 130) ? 1 : 2), UNIFORM, Sum(), short(0)); - TestPointer( num_items * ((sm_version <= 130) ? 1 : 2), UNIFORM, Sum(), short(0)); - - printf("----------------------------\n"); - TestPointer( num_items , UNIFORM, Sum(), (int) (0)); - TestPointer( num_items , UNIFORM, Sum(), (int) (0)); - - printf("----------------------------\n"); - TestPointer( num_items / 2, UNIFORM, Sum(), (long long) (0)); - TestPointer(num_items / 2, UNIFORM, Sum(), (long long) (0)); - - printf("----------------------------\n"); - TestPointer( num_items / 4, UNIFORM, Sum(), TestBar()); - TestPointer( num_items / 4, UNIFORM, Sum(), TestBar()); - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - // Test different input+output data types - TestSize(num_items, (int) 0, (int) 99); - - // Test same intput+output data types - TestSize(num_items, (unsigned char) 0, (unsigned char) 99); - TestSize(num_items, (char) 0, (char) 99); - TestSize(num_items, (unsigned short) 0, (unsigned short)99); - TestSize(num_items, (unsigned int) 0, (unsigned int) 99); - TestSize(num_items, (unsigned long long) 0, (unsigned long long) 99); - - TestSize(num_items, make_uchar2(0, 0), make_uchar2(17, 21)); - TestSize(num_items, make_char2(0, 0), make_char2(17, 21)); - TestSize(num_items, make_ushort2(0, 0), make_ushort2(17, 21)); - TestSize(num_items, make_uint2(0, 0), make_uint2(17, 21)); - TestSize(num_items, make_ulonglong2(0, 0), make_ulonglong2(17, 21)); - TestSize(num_items, make_uchar4(0, 0, 0, 0), make_uchar4(17, 21, 32, 85)); - TestSize(num_items, make_char4(0, 0, 0, 0), make_char4(17, 21, 32, 85)); - - TestSize(num_items, make_ushort4(0, 0, 0, 0), make_ushort4(17, 21, 32, 85)); - TestSize(num_items, make_uint4(0, 0, 0, 0), make_uint4(17, 21, 32, 85)); - TestSize(num_items, make_ulonglong4(0, 0, 0, 0), make_ulonglong4(17, 21, 32, 85)); - - TestSize(num_items, - TestFoo::MakeTestFoo(0, 0, 0, 0), - TestFoo::MakeTestFoo(1ll << 63, 1 << 31, short(1 << 15), char(1 << 7))); - - TestSize(num_items, - TestBar(0, 0), - TestBar(1ll << 63, 1 << 31)); - } - -#endif - - return 0; -} - - - diff --git a/ml-xgboost/cub/test/test_device_select_if.cu b/ml-xgboost/cub/test/test_device_select_if.cu deleted file mode 100644 index ed52079..0000000 --- a/ml-xgboost/cub/test/test_device_select_if.cu +++ /dev/null @@ -1,1039 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of DeviceSelect::If and DevicePartition::If utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -int g_timing_iterations = 0; -int g_repeat = 0; -float g_device_giga_bandwidth; -CachingDeviceAllocator g_allocator(true); - -// Dispatch types -enum Backend -{ - CUB, // CUB method - THRUST, // Thrust method - CDP, // GPU-based (dynamic parallelism) dispatch to CUB method -}; - - -// Selection functor type -template -struct LessThan -{ - T compare; - - __host__ __device__ __forceinline__ - LessThan(T compare) : compare(compare) {} - - __host__ __device__ __forceinline__ - bool operator()(const T &a) const { - return (a < compare); - } -}; - -//--------------------------------------------------------------------- -// Dispatch to different CUB DeviceSelect entrypoints -//--------------------------------------------------------------------- - - -/** - * Dispatch to select if entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_flagged, - Int2Type is_partition, - int timing_timing_iterations, - size_t* d_temp_storage_bytes, - cudaError_t* d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - FlagIteratorT d_flags, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - SelectOpT select_op, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous); - } - return error; -} - - -/** - * Dispatch to partition if entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_flagged, - Int2Type is_partition, - int timing_timing_iterations, - size_t* d_temp_storage_bytes, - cudaError_t* d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - FlagIteratorT d_flags, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - SelectOpT select_op, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous); - } - return error; -} - - -/** - * Dispatch to select flagged entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_flagged, - Int2Type partition, - int timing_timing_iterations, - size_t* d_temp_storage_bytes, - cudaError_t* d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - FlagIteratorT d_flags, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - SelectOpT select_op, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous); - } - return error; -} - - -/** - * Dispatch to partition flagged entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_flagged, - Int2Type partition, - int timing_timing_iterations, - size_t* d_temp_storage_bytes, - cudaError_t* d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - FlagIteratorT d_flags, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - SelectOpT select_op, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous); - } - return error; -} - - -//--------------------------------------------------------------------- -// Dispatch to different Thrust entrypoints -//--------------------------------------------------------------------- - -/** - * Dispatch to select if entrypoint - */ -template -__host__ __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_flagged, - Int2Type is_partition, - int timing_timing_iterations, - size_t* d_temp_storage_bytes, - cudaError_t* d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - FlagIteratorT d_flags, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - SelectOpT select_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_out_wrapper_end; - thrust::device_ptr d_in_wrapper(d_in); - thrust::device_ptr d_out_wrapper(d_out); - - for (int i = 0; i < timing_timing_iterations; ++i) - { - d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper, select_op); - } - - OffsetT num_selected = d_out_wrapper_end - d_out_wrapper; - CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice)); - } - - return cudaSuccess; -} - - -/** - * Dispatch to partition if entrypoint - */ -template -__host__ __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_flagged, - Int2Type is_partition, - int timing_timing_iterations, - size_t* d_temp_storage_bytes, - cudaError_t* d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - FlagIteratorT d_flags, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - SelectOpT select_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - typedef thrust::reverse_iterator > ReverseOutputIteratorT; - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::pair, ReverseOutputIteratorT> d_out_wrapper_end; - - thrust::device_ptr d_in_wrapper(d_in); - thrust::device_ptr d_out_wrapper(d_out); - - ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items); - - for (int i = 0; i < timing_timing_iterations; ++i) - { - d_out_wrapper_end = thrust::partition_copy( - d_in_wrapper, - d_in_wrapper + num_items, - d_out_wrapper, - d_out_unselected, - select_op); - } - - OffsetT num_selected = d_out_wrapper_end.first - d_out_wrapper; - CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice)); - } - - return cudaSuccess; -} - - -/** - * Dispatch to select flagged entrypoint - */ -template -__host__ __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_flagged, - Int2Type is_partition, - int timing_timing_iterations, - size_t* d_temp_storage_bytes, - cudaError_t* d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - FlagIteratorT d_flags, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - SelectOpT select_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // The flag type - typedef typename std::iterator_traits::value_type FlagT; - - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_out_wrapper_end; - thrust::device_ptr d_in_wrapper(d_in); - thrust::device_ptr d_out_wrapper(d_out); - thrust::device_ptr d_flags_wrapper(d_flags); - - for (int i = 0; i < timing_timing_iterations; ++i) - { - d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_flags_wrapper, d_out_wrapper, Cast()); - } - - OffsetT num_selected = d_out_wrapper_end - d_out_wrapper; - CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice)); - } - - return cudaSuccess; -} - - -/** - * Dispatch to partition flagged entrypoint - */ -template -__host__ __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - Int2Type is_flagged, - Int2Type is_partition, - int timing_timing_iterations, - size_t* d_temp_storage_bytes, - cudaError_t* d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - FlagIteratorT d_flags, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - SelectOpT select_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // The flag type - typedef typename std::iterator_traits::value_type FlagT; - - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - typedef thrust::reverse_iterator > ReverseOutputIteratorT; - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::pair, ReverseOutputIteratorT> d_out_wrapper_end; - - thrust::device_ptr d_in_wrapper(d_in); - thrust::device_ptr d_out_wrapper(d_out); - thrust::device_ptr d_flags_wrapper(d_flags); - ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items); - - for (int i = 0; i < timing_timing_iterations; ++i) - { - d_out_wrapper_end = thrust::partition_copy( - d_in_wrapper, - d_in_wrapper + num_items, - d_flags_wrapper, - d_out_wrapper, - d_out_unselected, - Cast()); - } - - OffsetT num_selected = d_out_wrapper_end.first - d_out_wrapper; - CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice)); - } - - return cudaSuccess; -} - - -//--------------------------------------------------------------------- -// CUDA Nested Parallelism Test Kernel -//--------------------------------------------------------------------- - -/** - * Simple wrapper kernel to invoke DeviceSelect - */ -template -__global__ void CnpDispatchKernel( - IsFlaggedTag is_flagged, - IsPartitionTag is_partition, - int timing_timing_iterations, - size_t* d_temp_storage_bytes, - cudaError_t* d_cdp_error, - - void* d_temp_storage, - size_t temp_storage_bytes, - InputIteratorT d_in, - FlagIteratorT d_flags, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - SelectOpT select_op, - bool debug_synchronous) -{ - -#ifndef CUB_CDP - *d_cdp_error = cudaErrorNotSupported; -#else - *d_cdp_error = Dispatch(Int2Type(), is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, debug_synchronous); - *d_temp_storage_bytes = temp_storage_bytes; -#endif -} - - -/** - * Dispatch to CDP kernel - */ -template -cudaError_t Dispatch( - Int2Type dispatch_to, - IsFlaggedTag is_flagged, - IsPartitionTag is_partition, - int timing_timing_iterations, - size_t* d_temp_storage_bytes, - cudaError_t* d_cdp_error, - - void* d_temp_storage, - size_t& temp_storage_bytes, - InputIteratorT d_in, - FlagIteratorT d_flags, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - SelectOpT select_op, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to invoke device-side dispatch - CnpDispatchKernel<<<1,1>>>(is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, debug_synchronous); - - // Copy out temp_storage_bytes - CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost)); - - // Copy out error - cudaError_t retval; - CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost)); - return retval; -} - - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - - -/** - * Initialize problem - */ -template -void Initialize( - T* h_in, - int num_items) -{ - for (int i = 0; i < num_items; ++i) - { - // Initialize each item to a randomly selected value from [0..126] - unsigned int value; - RandomBits(value, 0, 0, 7); - if (value == 127) - value = 126; - InitValue(INTEGER_SEED, h_in[i], value); - } - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n\n"); - } -} - - -/** - * Solve selection problem (and set corresponding flags) - */ -template < - typename InputIteratorT, - typename FlagIteratorT, - typename SelectOpT, - typename T> -int Solve( - InputIteratorT h_in, - SelectOpT select_op, - T* h_reference, - FlagIteratorT h_flags, - int num_items) -{ - int num_selected = 0; - for (int i = 0; i < num_items; ++i) - { - if ((h_flags[i] = select_op(h_in[i]))) - { - h_reference[num_selected] = h_in[i]; - num_selected++; - } - else - { - h_reference[num_items - (i - num_selected) - 1] = h_in[i]; - } - } - - return num_selected; -} - - - -/** - * Test DeviceSelect for a given problem input - */ -template < - Backend BACKEND, - bool IS_FLAGGED, - bool IS_PARTITION, - typename DeviceInputIteratorT, - typename FlagT, - typename SelectOpT, - typename T> -void Test( - DeviceInputIteratorT d_in, - FlagT* h_flags, - SelectOpT select_op, - T* h_reference, - int num_selected, - int num_items) -{ - // Allocate device flags, output, and num-selected - FlagT* d_flags = NULL; - T* d_out = NULL; - int* d_num_selected_out = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(FlagT) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int))); - - // Allocate CDP device arrays - size_t* d_temp_storage_bytes = NULL; - cudaError_t* d_cdp_error = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1)); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(Dispatch(Int2Type(), Int2Type(), Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Copy flags and clear device output array - CubDebugExit(cudaMemcpy(d_flags, h_flags, sizeof(FlagT) * num_items, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * num_items)); - CubDebugExit(cudaMemset(d_num_selected_out, 0, sizeof(int))); - - // Run warmup/correctness iteration - CubDebugExit(Dispatch(Int2Type(), Int2Type(), Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true)); - - // Check for correctness (and display results, if specified) - int compare1 = (IS_PARTITION) ? - CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose) : - CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose); - printf("\t Data %s\n", compare1 ? "FAIL" : "PASS"); - - int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose); - printf("\t Count %s\n", compare2 ? "FAIL" : "PASS"); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Performance - GpuTimer gpu_timer; - gpu_timer.Start(); - CubDebugExit(Dispatch(Int2Type(), Int2Type(), Int2Type(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, false)); - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - // Display performance - if (g_timing_iterations > 0) - { - float avg_millis = elapsed_millis / g_timing_iterations; - float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f; - int num_output_items = (IS_PARTITION) ? num_items : num_selected; - int num_flag_items = (IS_FLAGGED) ? num_items : 0; - size_t num_bytes = sizeof(T) * (num_items + num_output_items) + sizeof(FlagT) * num_flag_items; - float giga_bandwidth = float(num_bytes) / avg_millis / 1000.0f / 1000.0f; - - printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0); - } - printf("\n\n"); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Cleanup - if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out)); - if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); - if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - // Correctness asserts - AssertEquals(0, compare1 | compare2); -} - - -/** - * Test on pointer type - */ -template < - Backend BACKEND, - bool IS_FLAGGED, - bool IS_PARTITION, - typename T> -void TestPointer( - int num_items, - float select_ratio) -{ - typedef char FlagT; - - // Allocate host arrays - T* h_in = new T[num_items]; - FlagT* h_flags = new FlagT[num_items]; - T* h_reference = new T[num_items]; - - // Initialize input - Initialize(h_in, num_items); - - // Select a comparison value that is select_ratio through the space of [0,127] - T compare; - if (select_ratio <= 0.0) - InitValue(INTEGER_SEED, compare, 0); // select none - else if (select_ratio >= 1.0) - InitValue(INTEGER_SEED, compare, 127); // select all - else - InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio))); - - LessThan select_op(compare); - int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items); - - if (g_verbose) std::cout << "\nComparison item: " << compare << "\n"; - printf("\nPointer %s cub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n", - (IS_PARTITION) ? "DevicePartition" : "DeviceSelect", - (IS_FLAGGED) ? "Flagged" : "If", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", - num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T)); - fflush(stdout); - - // Allocate problem device arrays - T *d_in = NULL; - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice)); - - // Run Test - Test(d_in, h_flags, select_op, h_reference, num_selected, num_items); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (h_flags) delete[] h_flags; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); -} - - -/** - * Test on iterator type - */ -template < - Backend BACKEND, - bool IS_FLAGGED, - bool IS_PARTITION, - typename T> -void TestIterator( - int num_items, - float select_ratio) -{ - typedef char FlagT; - - // Allocate host arrays - T* h_reference = new T[num_items]; - FlagT* h_flags = new FlagT[num_items]; - - // Use counting iterator as the input - CountingInputIterator h_in(0); - - // Select a comparison value that is select_ratio through the space of [0,127] - T compare; - if (select_ratio <= 0.0) - InitValue(INTEGER_SEED, compare, 0); // select none - else if (select_ratio >= 1.0) - InitValue(INTEGER_SEED, compare, 127); // select all - else - InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio))); - - LessThan select_op(compare); - int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items); - - if (g_verbose) std::cout << "\nComparison item: " << compare << "\n"; - printf("\nIterator %s cub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n", - (IS_PARTITION) ? "DevicePartition" : "DeviceSelect", - (IS_FLAGGED) ? "Flagged" : "If", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", - num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T)); - fflush(stdout); - - // Run Test - Test(h_in, h_flags, select_op, h_reference, num_selected, num_items); - - // Cleanup - if (h_reference) delete[] h_reference; - if (h_flags) delete[] h_flags; -} - - -/** - * Test different selection ratios - */ -template < - Backend BACKEND, - bool IS_FLAGGED, - bool IS_PARTITION, - typename T> -void Test( - int num_items) -{ - for (float select_ratio = 0.0f; select_ratio <= 1.0f; select_ratio += 0.2f) - { - TestPointer(num_items, select_ratio); - } -} - - -/** - * Test (select vs. partition) and (flagged vs. functor) - */ -template < - Backend BACKEND, - typename T> -void TestMethod( - int num_items) -{ - // Functor - Test(num_items); - Test(num_items); - - // Flagged - Test(num_items); - Test(num_items); -} - - -/** - * Test different dispatch - */ -template < - typename T> -void TestOp( - int num_items) -{ - TestMethod(num_items); -#ifdef CUB_CDP - TestMethod(num_items); -#endif -} - - -/** - * Test different input sizes - */ -template -void Test( - int num_items) -{ - if (num_items < 0) - { - TestOp(0); - TestOp(1); - TestOp(100); - TestOp(10000); - TestOp(1000000); - } - else - { - TestOp(num_items); - } -} - -/** - * Test select/partition on pointer types - */ -template -void ComparePointer( - int num_items, - float select_ratio) -{ - printf("-- Select-if ----------------------------\n"); - TestPointer(num_items, select_ratio); - TestPointer(num_items, select_ratio); - - printf("-- Partition-if ----------------------------\n"); - TestPointer(num_items, select_ratio); - TestPointer(num_items, select_ratio); - - printf("-- Select-flagged ----------------------------\n"); - TestPointer(num_items, select_ratio); - TestPointer(num_items, select_ratio); - - printf("-- Partition-flagged ----------------------------\n"); - TestPointer(num_items, select_ratio); - TestPointer(num_items, select_ratio); - -} - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = -1; - float select_ratio = 0.5; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("i", g_timing_iterations); - args.GetCmdLineArgument("repeat", g_repeat); - args.GetCmdLineArgument("ratio", select_ratio); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--i= " - "[--device=] " - "[--ratio=] " - "[--repeat=] " - "[--v] " - "[--cdp] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - g_device_giga_bandwidth = args.device_giga_bandwidth; - printf("\n"); - -#ifdef QUICKER_TEST - - // Compile/run basic CUB test - if (num_items < 0) num_items = 32000000; - - printf("-- Select-if ----------------------------\n"); - TestPointer(num_items, select_ratio); - - printf("-- Partition-if ----------------------------\n"); - TestPointer(num_items, select_ratio); - - printf("-- Select-flagged ----------------------------\n"); - TestPointer(num_items, select_ratio); - - printf("-- Partition-flagged ----------------------------\n"); - TestPointer(num_items, select_ratio); - - -#elif defined(QUICK_TEST) - - // Get device ordinal - int device_ordinal; - CubDebugExit(cudaGetDevice(&device_ordinal)); - - // Get device SM version - int sm_version; - CubDebugExit(SmVersion(sm_version, device_ordinal)); - - // Compile/run quick tests - if (num_items < 0) num_items = 32000000; - - printf("-- Iterator ----------------------------\n"); - TestIterator(num_items, select_ratio); - - ComparePointer( num_items * ((sm_version <= 130) ? 1 : 4), select_ratio); - ComparePointer( num_items * ((sm_version <= 130) ? 1 : 2), select_ratio); - ComparePointer( num_items, select_ratio); - ComparePointer( num_items / 2, select_ratio); - ComparePointer( num_items / 4, select_ratio); - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - // Test different input types - Test(num_items); - Test(num_items); - Test(num_items); - Test(num_items); - - Test(num_items); - Test(num_items); - Test(num_items); - Test(num_items); - - Test(num_items); - Test(num_items); - Test(num_items); - Test(num_items); - - Test(num_items); - Test(num_items); - } - -#endif - - return 0; -} - - - diff --git a/ml-xgboost/cub/test/test_device_select_unique.cu b/ml-xgboost/cub/test/test_device_select_unique.cu deleted file mode 100644 index 34a3ae9..0000000 --- a/ml-xgboost/cub/test/test_device_select_unique.cu +++ /dev/null @@ -1,648 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of DeviceSelect::Unique utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include - -#include -#include -#include - -#include -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -int g_timing_iterations = 0; -int g_repeat = 0; -float g_device_giga_bandwidth; -CachingDeviceAllocator g_allocator(true); - -// Dispatch types -enum Backend -{ - CUB, // CUB method - THRUST, // Thrust method - CDP, // GPU-based (dynamic parallelism) dispatch to CUB method -}; - - -//--------------------------------------------------------------------- -// Dispatch to different CUB DeviceSelect entrypoints -//--------------------------------------------------------------------- - - -/** - * Dispatch to unique entrypoint - */ -template -CUB_RUNTIME_FUNCTION __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t &temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - cudaError_t error = cudaSuccess; - for (int i = 0; i < timing_timing_iterations; ++i) - { - error = DeviceSelect::Unique(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, stream, debug_synchronous); - } - return error; -} - - -//--------------------------------------------------------------------- -// Dispatch to different Thrust entrypoints -//--------------------------------------------------------------------- - - -/** - * Dispatch to unique entrypoint - */ -template -__host__ __forceinline__ -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void *d_temp_storage, - size_t &temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - // The input value type - typedef typename std::iterator_traits::value_type InputT; - - // The output value type - typedef typename If<(Equals::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? - typename std::iterator_traits::value_type, // ... then the input iterator's value type, - typename std::iterator_traits::value_type>::Type OutputT; // ... else the output iterator's value type - - if (d_temp_storage == 0) - { - temp_storage_bytes = 1; - } - else - { - thrust::device_ptr d_out_wrapper_end; - thrust::device_ptr d_in_wrapper(d_in); - thrust::device_ptr d_out_wrapper(d_out); - for (int i = 0; i < timing_timing_iterations; ++i) - { - d_out_wrapper_end = thrust::unique_copy(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper); - } - - OffsetT num_selected = d_out_wrapper_end - d_out_wrapper; - CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice)); - - } - - return cudaSuccess; -} - - - -//--------------------------------------------------------------------- -// CUDA Nested Parallelism Test Kernel -//--------------------------------------------------------------------- - -/** - * Simple wrapper kernel to invoke DeviceSelect - */ -template -__global__ void CnpDispatchKernel( - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - bool debug_synchronous) -{ - -#ifndef CUB_CDP - *d_cdp_error = cudaErrorNotSupported; -#else - *d_cdp_error = Dispatch(Int2Type(), timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, 0, debug_synchronous); - *d_temp_storage_bytes = temp_storage_bytes; -#endif -} - - -/** - * Dispatch to CDP kernel - */ -template -cudaError_t Dispatch( - Int2Type dispatch_to, - int timing_timing_iterations, - size_t *d_temp_storage_bytes, - cudaError_t *d_cdp_error, - - void* d_temp_storage, - size_t &temp_storage_bytes, - InputIteratorT d_in, - OutputIteratorT d_out, - NumSelectedIteratorT d_num_selected_out, - OffsetT num_items, - cudaStream_t stream, - bool debug_synchronous) -{ - // Invoke kernel to invoke device-side dispatch - CnpDispatchKernel<<<1,1>>>(timing_timing_iterations, d_temp_storage_bytes, d_cdp_error, - d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, debug_synchronous); - - // Copy out temp_storage_bytes - CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost)); - - // Copy out error - cudaError_t retval; - CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost)); - return retval; -} - - - -//--------------------------------------------------------------------- -// Test generation -//--------------------------------------------------------------------- - - -/** - * Initialize problem - */ -template -void Initialize( - int entropy_reduction, - T *h_in, - int num_items, - int max_segment) -{ - unsigned int max_int = (unsigned int) -1; - - int key = 0; - int i = 0; - while (i < num_items) - { - // Select number of repeating occurrences for the current run - int repeat; - if (max_segment < 0) - { - repeat = num_items; - } - else if (max_segment < 2) - { - repeat = 1; - } - else - { - RandomBits(repeat, entropy_reduction); - repeat = (int) ((double(repeat) * double(max_segment)) / double(max_int)); - repeat = CUB_MAX(1, repeat); - } - - int j = i; - while (j < CUB_MIN(i + repeat, num_items)) - { - InitValue(INTEGER_SEED, h_in[j], key); - j++; - } - - i = j; - key++; - } - - if (g_verbose) - { - printf("Input:\n"); - DisplayResults(h_in, num_items); - printf("\n\n"); - } -} - - -/** - * Solve unique problem - */ -template < - typename InputIteratorT, - typename T> -int Solve( - InputIteratorT h_in, - T *h_reference, - int num_items) -{ - int num_selected = 0; - if (num_items > 0) - { - h_reference[num_selected] = h_in[0]; - num_selected++; - } - - for (int i = 1; i < num_items; ++i) - { - if (h_in[i] != h_in[i - 1]) - { - h_reference[num_selected] = h_in[i]; - num_selected++; - } - } - - return num_selected; -} - - - -/** - * Test DeviceSelect for a given problem input - */ -template < - Backend BACKEND, - typename DeviceInputIteratorT, - typename T> -void Test( - DeviceInputIteratorT d_in, - T *h_reference, - int num_selected, - int num_items) -{ - // Allocate device output array and num selected - T *d_out = NULL; - int *d_num_selected_out = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * num_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int))); - - // Allocate CDP device arrays - size_t *d_temp_storage_bytes = NULL; - cudaError_t *d_cdp_error = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1)); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(Dispatch(Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, 0, true)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Clear device output array - CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * num_items)); - CubDebugExit(cudaMemset(d_num_selected_out, 0, sizeof(int))); - - // Run warmup/correctness iteration - CubDebugExit(Dispatch(Int2Type(), 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, 0, true)); - - // Check for correctness (and display results, if specified) - int compare1 = CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose); - printf("\t Data %s ", compare1 ? "FAIL" : "PASS"); - - int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose); - printf("\t Count %s ", compare2 ? "FAIL" : "PASS"); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Performance - GpuTimer gpu_timer; - gpu_timer.Start(); - CubDebugExit(Dispatch(Int2Type(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, 0, false)); - gpu_timer.Stop(); - float elapsed_millis = gpu_timer.ElapsedMillis(); - - // Display performance - if (g_timing_iterations > 0) - { - float avg_millis = elapsed_millis / g_timing_iterations; - float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f; - float giga_bandwidth = float((num_items + num_selected) * sizeof(T)) / avg_millis / 1000.0f / 1000.0f; - printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0); - } - printf("\n\n"); - - // Flush any stdout/stderr - fflush(stdout); - fflush(stderr); - - // Cleanup - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out)); - if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); - if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - - // Correctness asserts - AssertEquals(0, compare1 | compare2); -} - - -/** - * Test DeviceSelect on pointer type - */ -template < - Backend BACKEND, - typename T> -void TestPointer( - int num_items, - int entropy_reduction, - int max_segment) -{ - // Allocate host arrays - T* h_in = new T[num_items]; - T* h_reference = new T[num_items]; - - // Initialize problem and solution - Initialize(entropy_reduction, h_in, num_items, max_segment); - int num_selected = Solve(h_in, h_reference, num_items); - - printf("\nPointer %s cub::DeviceSelect::Unique %d items, %d selected (avg run length %.3f), %s %d-byte elements, entropy_reduction %d\n", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", - num_items, num_selected, float(num_items) / num_selected, - typeid(T).name(), - (int) sizeof(T), - entropy_reduction); - fflush(stdout); - - // Allocate problem device arrays - T *d_in = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items)); - - // Initialize device input - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice)); - - // Run Test - Test(d_in, h_reference, num_selected, num_items); - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); -} - - -/** - * Test DeviceSelect on iterator type - */ -template < - Backend BACKEND, - typename T> -void TestIterator( - int num_items) -{ - // Use a counting iterator as the input - CountingInputIterator h_in(0); - - // Allocate host arrays - T* h_reference = new T[num_items]; - - // Initialize problem and solution - int num_selected = Solve(h_in, h_reference, num_items); - - printf("\nIterator %s cub::DeviceSelect::Unique %d items, %d selected (avg run length %.3f), %s %d-byte elements\n", - (BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB", - num_items, num_selected, float(num_items) / num_selected, - typeid(T).name(), - (int) sizeof(T)); - fflush(stdout); - - // Run Test - Test(h_in, h_reference, num_selected, num_items); - - // Cleanup - if (h_reference) delete[] h_reference; -} - - -/** - * Test different gen modes - */ -template < - Backend BACKEND, - typename T> -void Test( - int num_items) -{ - for (int max_segment = 1; ((max_segment > 0) && (max_segment < num_items)); max_segment *= 11) - { - TestPointer(num_items, 0, max_segment); - TestPointer(num_items, 2, max_segment); - TestPointer(num_items, 7, max_segment); - } -} - - -/** - * Test different dispatch - */ -template < - typename T> -void TestOp( - int num_items) -{ - Test(num_items); -#ifdef CUB_CDP - Test(num_items); -#endif -} - - -/** - * Test different input sizes - */ -template -void Test( - int num_items) -{ - if (num_items < 0) - { - TestOp(0); - TestOp(1); - TestOp(100); - TestOp(10000); - TestOp(1000000); - } - else - { - TestOp(num_items); - } -} - - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - int num_items = -1; - int entropy_reduction = 0; - int maxseg = 1000; - - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("n", num_items); - args.GetCmdLineArgument("i", g_timing_iterations); - args.GetCmdLineArgument("repeat", g_repeat); - args.GetCmdLineArgument("maxseg", maxseg); - args.GetCmdLineArgument("entropy", entropy_reduction); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--n= " - "[--i= " - "[--device=] " - "[--maxseg=]" - "[--entropy=]" - "[--repeat=]" - "[--v] " - "[--cdp]" - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - g_device_giga_bandwidth = args.device_giga_bandwidth; - printf("\n"); - -#ifdef QUICKER_TEST - - // Compile/run basic CUB test - if (num_items < 0) num_items = 32000000; - TestPointer( num_items, entropy_reduction, maxseg); - -#elif defined(QUICK_TEST) - - // Get device ordinal - int device_ordinal; - CubDebugExit(cudaGetDevice(&device_ordinal)); - - // Get device SM version - int sm_version; - CubDebugExit(SmVersion(sm_version, device_ordinal)); - - // Compile/run quick tests - if (num_items < 0) num_items = 32000000; - - printf("-- Iterator ----------------------------\n"); - TestIterator( num_items, entropy_reduction, maxseg); - - printf("----------------------------\n"); - TestPointer( num_items * ((sm_version <= 130) ? 1 : 4), entropy_reduction, maxseg); - TestPointer( num_items * ((sm_version <= 130) ? 1 : 4), entropy_reduction, maxseg); - - printf("----------------------------\n"); - TestPointer( num_items * ((sm_version <= 130) ? 1 : 2), entropy_reduction, maxseg); - TestPointer( num_items * ((sm_version <= 130) ? 1 : 2), entropy_reduction, maxseg); - - printf("----------------------------\n"); - TestPointer( num_items, entropy_reduction, maxseg); - TestPointer( num_items, entropy_reduction, maxseg); - - printf("----------------------------\n"); - TestPointer( num_items / 2, entropy_reduction, maxseg); - TestPointer(num_items / 2, entropy_reduction, maxseg); - - printf("----------------------------\n"); - TestPointer( num_items / 4, entropy_reduction, maxseg); - TestPointer( num_items / 4, entropy_reduction, maxseg); - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - // Test different input types - Test(num_items); - Test(num_items); - Test(num_items); - Test(num_items); - - Test(num_items); - Test(num_items); - Test(num_items); - Test(num_items); - - Test(num_items); - Test(num_items); - Test(num_items); - Test(num_items); - - Test(num_items); - Test(num_items); - } - -#endif - - return 0; -} - - - diff --git a/ml-xgboost/cub/test/test_grid_barrier.cu b/ml-xgboost/cub/test/test_grid_barrier.cu deleted file mode 100644 index c47f98a..0000000 --- a/ml-xgboost/cub/test/test_grid_barrier.cu +++ /dev/null @@ -1,152 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test evaluation for software global barrier throughput - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include - -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Test kernels -//--------------------------------------------------------------------- - -/** - * Kernel that iterates through the specified number of software global barriers - */ -__global__ void Kernel( - GridBarrier global_barrier, - int iterations) -{ - for (int i = 0; i < iterations; i++) - { - global_barrier.Sync(); - } -} - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - cudaError_t retval = cudaSuccess; - - // Defaults - int iterations = 10000; - int block_size = 128; - int grid_size = -1; - - // Initialize command line - CommandLineArgs args(argc, argv); - - // Get args - args.GetCmdLineArgument("i", iterations); - args.GetCmdLineArgument("grid-size", grid_size); - args.GetCmdLineArgument("block-size", block_size); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=]" - "[--i=]" - "[--grid-size]" - "[--block-size]" - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Get device ordinal - int device_ordinal; - CubDebugExit(cudaGetDevice(&device_ordinal)); - - // Get device SM version - int sm_version; - CubDebugExit(SmVersion(sm_version, device_ordinal)); - - // Get SM properties - int sm_count, max_block_threads, max_sm_occupancy; - CubDebugExit(cudaDeviceGetAttribute(&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal)); - CubDebugExit(cudaDeviceGetAttribute(&max_block_threads, cudaDevAttrMaxThreadsPerBlock, device_ordinal)); - CubDebugExit(MaxSmOccupancy(max_sm_occupancy, EmptyKernel, 32)); - - // Compute grid size and occupancy - int occupancy = CUB_MIN((max_block_threads / block_size), max_sm_occupancy); - - if (grid_size == -1) - { - grid_size = occupancy * sm_count; - } - else - { - occupancy = grid_size / sm_count; - } - - printf("Initializing software global barrier for Kernel<<<%d,%d>>> with %d occupancy\n", - grid_size, block_size, occupancy); - fflush(stdout); - - // Init global barrier - GridBarrierLifetime global_barrier; - global_barrier.Setup(grid_size); - - // Time kernel - GpuTimer gpu_timer; - gpu_timer.Start(); - Kernel<<>>(global_barrier, iterations); - gpu_timer.Stop(); - - retval = CubDebug(cudaThreadSynchronize()); - - // Output timing results - float avg_elapsed = gpu_timer.ElapsedMillis() / float(iterations); - printf("%d iterations, %f total elapsed millis, %f avg elapsed millis\n", - iterations, - gpu_timer.ElapsedMillis(), - avg_elapsed); - - return retval; -} diff --git a/ml-xgboost/cub/test/test_iterator.cu b/ml-xgboost/cub/test/test_iterator.cu deleted file mode 100644 index 2fe6063..0000000 --- a/ml-xgboost/cub/test/test_iterator.cu +++ /dev/null @@ -1,805 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of iterator utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -#include "test_util.h" - -using namespace cub; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -CachingDeviceAllocator g_allocator(true); - -// Dispatch types -enum Backend -{ - CUB, // CUB method - THRUST, // Thrust method - CDP, // GPU-based (dynamic parallelism) dispatch to CUB method -}; - - -template -struct TransformOp -{ - // Increment transform - __host__ __device__ __forceinline__ T operator()(T input) const - { - T addend; - InitValue(INTEGER_SEED, addend, 1); - return input + addend; - } -}; - -struct SelectOp -{ - template - __host__ __device__ __forceinline__ bool operator()(T input) - { - return true;; - } -}; - - -//--------------------------------------------------------------------- -// Test kernels -//--------------------------------------------------------------------- - -/** - * Test random access input iterator - */ -template < - typename InputIteratorT, - typename T> -__global__ void Kernel( - InputIteratorT d_in, - T *d_out, - InputIteratorT *d_itrs) -{ - d_out[0] = *d_in; // Value at offset 0 - d_out[1] = d_in[100]; // Value at offset 100 - d_out[2] = *(d_in + 1000); // Value at offset 1000 - d_out[3] = *(d_in + 10000); // Value at offset 10000 - - d_in++; - d_out[4] = d_in[0]; // Value at offset 1 - - d_in += 20; - d_out[5] = d_in[0]; // Value at offset 21 - d_itrs[0] = d_in; // Iterator at offset 21 - - d_in -= 10; - d_out[6] = d_in[0]; // Value at offset 11; - - d_in -= 11; - d_out[7] = d_in[0]; // Value at offset 0 - d_itrs[1] = d_in; // Iterator at offset 0 -} - - - -//--------------------------------------------------------------------- -// Host testing subroutines -//--------------------------------------------------------------------- - - -/** - * Run iterator test on device - */ -template < - typename InputIteratorT, - typename T, - int TEST_VALUES> -void Test( - InputIteratorT d_in, - T (&h_reference)[TEST_VALUES]) -{ - // Allocate device arrays - T *d_out = NULL; - InputIteratorT *d_itrs = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * TEST_VALUES)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_itrs, sizeof(InputIteratorT) * 2)); - - int compare; - - // Run unguarded kernel - Kernel<<<1, 1>>>(d_in, d_out, d_itrs); - - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Check results - compare = CompareDeviceResults(h_reference, d_out, TEST_VALUES, g_verbose, g_verbose); - printf("\tValues: %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Check iterator at offset 21 - InputIteratorT h_itr = d_in + 21; - compare = CompareDeviceResults(&h_itr, d_itrs, 1, g_verbose, g_verbose); - printf("\tIterators: %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Check iterator at offset 0 - compare = CompareDeviceResults(&d_in, d_itrs + 1, 1, g_verbose, g_verbose); - printf("\tIterators: %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_itrs) CubDebugExit(g_allocator.DeviceFree(d_itrs)); -} - - -/** - * Test constant iterator - */ -template -void TestConstant(T base) -{ - printf("\nTesting constant iterator on type %s (base: %lld)\n", typeid(T).name(), (unsigned long long) (base)); fflush(stdout); - - // - // Test iterator manipulation in kernel - // - - T h_reference[8] = {base, base, base, base, base, base, base, base}; - ConstantInputIterator d_itr(base); - Test(d_itr, h_reference); - -#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer - - // - // Test with thrust::copy_if() - // - - int copy_items = 100; - T *h_copy = new T[copy_items]; - T *d_copy = NULL; - - for (int i = 0; i < copy_items; ++i) - h_copy[i] = d_itr[i]; - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * copy_items)); - thrust::device_ptr d_copy_wrapper(d_copy); - - thrust::copy_if(d_itr, d_itr + copy_items, d_copy_wrapper, SelectOp()); - - int compare = CompareDeviceResults(h_copy, d_copy, copy_items, g_verbose, g_verbose); - printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - if (h_copy) delete[] h_copy; - if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy)); - -#endif // THRUST_VERSION -} - - -/** - * Test counting iterator - */ -template -void TestCounting(T base) -{ - printf("\nTesting counting iterator on type %s (base: %d) \n", typeid(T).name(), int(base)); fflush(stdout); - - // - // Test iterator manipulation in kernel - // - - // Initialize reference data - T h_reference[8]; - h_reference[0] = base + 0; // Value at offset 0 - h_reference[1] = base + 100; // Value at offset 100 - h_reference[2] = base + 1000; // Value at offset 1000 - h_reference[3] = base + 10000; // Value at offset 10000 - h_reference[4] = base + 1; // Value at offset 1 - h_reference[5] = base + 21; // Value at offset 21 - h_reference[6] = base + 11; // Value at offset 11 - h_reference[7] = base + 0; // Value at offset 0; - - CountingInputIterator d_itr(base); - Test(d_itr, h_reference); - -#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer - - // - // Test with thrust::copy_if() - // - - unsigned long long max_items = ((1ull << ((sizeof(T) * 8) - 1)) - 1); - size_t copy_items = (size_t) CUB_MIN(max_items - base, 100); // potential issue with differencing overflows when T is a smaller type than can handle the offset - T *h_copy = new T[copy_items]; - T *d_copy = NULL; - - for (unsigned long long i = 0; i < copy_items; ++i) - h_copy[i] = d_itr[i]; - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * copy_items)); - thrust::device_ptr d_copy_wrapper(d_copy); - thrust::copy_if(d_itr, d_itr + copy_items, d_copy_wrapper, SelectOp()); - - int compare = CompareDeviceResults(h_copy, d_copy, copy_items, g_verbose, g_verbose); - printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - if (h_copy) delete[] h_copy; - if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy)); - -#endif // THRUST_VERSION -} - - -/** - * Test modified iterator - */ -template -void TestModified() -{ - printf("\nTesting cache-modified iterator on type %s\n", typeid(T).name()); fflush(stdout); - - // - // Test iterator manipulation in kernel - // - - const unsigned int TEST_VALUES = 11000; - - T *h_data = new T[TEST_VALUES]; - for (int i = 0; i < TEST_VALUES; ++i) - { - RandomBits(h_data[i]); - } - - // Allocate device arrays - T *d_data = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_data, sizeof(T) * TEST_VALUES)); - CubDebugExit(cudaMemcpy(d_data, h_data, sizeof(T) * TEST_VALUES, cudaMemcpyHostToDevice)); - - // Initialize reference data - T h_reference[8]; - h_reference[0] = h_data[0]; // Value at offset 0 - h_reference[1] = h_data[100]; // Value at offset 100 - h_reference[2] = h_data[1000]; // Value at offset 1000 - h_reference[3] = h_data[10000]; // Value at offset 10000 - h_reference[4] = h_data[1]; // Value at offset 1 - h_reference[5] = h_data[21]; // Value at offset 21 - h_reference[6] = h_data[11]; // Value at offset 11 - h_reference[7] = h_data[0]; // Value at offset 0; - - Test(CacheModifiedInputIterator((CastT*) d_data), h_reference); - Test(CacheModifiedInputIterator((CastT*) d_data), h_reference); - Test(CacheModifiedInputIterator((CastT*) d_data), h_reference); - Test(CacheModifiedInputIterator((CastT*) d_data), h_reference); - Test(CacheModifiedInputIterator((CastT*) d_data), h_reference); - Test(CacheModifiedInputIterator((CastT*) d_data), h_reference); - Test(CacheModifiedInputIterator((CastT*) d_data), h_reference); - -#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer - - // - // Test with thrust::copy_if() - // - - T *d_copy = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * TEST_VALUES)); - - CacheModifiedInputIterator d_in_itr((CastT*) d_data); - CacheModifiedOutputIterator d_out_itr((CastT*) d_copy); - - thrust::copy_if(d_in_itr, d_in_itr + TEST_VALUES, d_out_itr, SelectOp()); - - int compare = CompareDeviceResults(h_data, d_copy, TEST_VALUES, g_verbose, g_verbose); - printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy)); - -#endif // THRUST_VERSION - - if (h_data) delete[] h_data; - if (d_data) CubDebugExit(g_allocator.DeviceFree(d_data)); -} - - -/** - * Test transform iterator - */ -template -void TestTransform() -{ - printf("\nTesting transform iterator on type %s\n", typeid(T).name()); fflush(stdout); - - // - // Test iterator manipulation in kernel - // - - const unsigned int TEST_VALUES = 11000; - - T *h_data = new T[TEST_VALUES]; - for (int i = 0; i < TEST_VALUES; ++i) - { - InitValue(INTEGER_SEED, h_data[i], i); - } - - // Allocate device arrays - T *d_data = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_data, sizeof(T) * TEST_VALUES)); - CubDebugExit(cudaMemcpy(d_data, h_data, sizeof(T) * TEST_VALUES, cudaMemcpyHostToDevice)); - - TransformOp op; - - // Initialize reference data - T h_reference[8]; - h_reference[0] = op(h_data[0]); // Value at offset 0 - h_reference[1] = op(h_data[100]); // Value at offset 100 - h_reference[2] = op(h_data[1000]); // Value at offset 1000 - h_reference[3] = op(h_data[10000]); // Value at offset 10000 - h_reference[4] = op(h_data[1]); // Value at offset 1 - h_reference[5] = op(h_data[21]); // Value at offset 21 - h_reference[6] = op(h_data[11]); // Value at offset 11 - h_reference[7] = op(h_data[0]); // Value at offset 0; - - TransformInputIterator, CastT*> d_itr((CastT*) d_data, op); - Test(d_itr, h_reference); - -#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer - - // - // Test with thrust::copy_if() - // - - T *h_copy = new T[TEST_VALUES]; - for (int i = 0; i < TEST_VALUES; ++i) - h_copy[i] = op(h_data[i]); - - T *d_copy = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * TEST_VALUES)); - thrust::device_ptr d_copy_wrapper(d_copy); - - thrust::copy_if(d_itr, d_itr + TEST_VALUES, d_copy_wrapper, SelectOp()); - - int compare = CompareDeviceResults(h_copy, d_copy, TEST_VALUES, g_verbose, g_verbose); - printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (h_copy) delete[] h_copy; - if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy)); - -#endif // THRUST_VERSION - - if (h_data) delete[] h_data; - if (d_data) CubDebugExit(g_allocator.DeviceFree(d_data)); -} - - -/** - * Test tex-obj texture iterator - */ -template -void TestTexObj() -{ - printf("\nTesting tex-obj iterator on type %s\n", typeid(T).name()); fflush(stdout); - - // - // Test iterator manipulation in kernel - // - - const unsigned int TEST_VALUES = 11000; - const unsigned int DUMMY_OFFSET = 500; - const unsigned int DUMMY_TEST_VALUES = TEST_VALUES - DUMMY_OFFSET; - - T *h_data = new T[TEST_VALUES]; - for (int i = 0; i < TEST_VALUES; ++i) - { - RandomBits(h_data[i]); - } - - // Allocate device arrays - T *d_data = NULL; - T *d_dummy = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_data, sizeof(T) * TEST_VALUES)); - CubDebugExit(cudaMemcpy(d_data, h_data, sizeof(T) * TEST_VALUES, cudaMemcpyHostToDevice)); - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_dummy, sizeof(T) * DUMMY_TEST_VALUES)); - CubDebugExit(cudaMemcpy(d_dummy, h_data + DUMMY_OFFSET, sizeof(T) * DUMMY_TEST_VALUES, cudaMemcpyHostToDevice)); - - // Initialize reference data - T h_reference[8]; - h_reference[0] = h_data[0]; // Value at offset 0 - h_reference[1] = h_data[100]; // Value at offset 100 - h_reference[2] = h_data[1000]; // Value at offset 1000 - h_reference[3] = h_data[10000]; // Value at offset 10000 - h_reference[4] = h_data[1]; // Value at offset 1 - h_reference[5] = h_data[21]; // Value at offset 21 - h_reference[6] = h_data[11]; // Value at offset 11 - h_reference[7] = h_data[0]; // Value at offset 0; - - // Create and bind obj-based test iterator - TexObjInputIterator d_obj_itr; - CubDebugExit(d_obj_itr.BindTexture((CastT*) d_data, sizeof(T) * TEST_VALUES)); - - Test(d_obj_itr, h_reference); - -#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer - - // - // Test with thrust::copy_if() - // - - T *d_copy = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * TEST_VALUES)); - thrust::device_ptr d_copy_wrapper(d_copy); - - CubDebugExit(cudaMemset(d_copy, 0, sizeof(T) * TEST_VALUES)); - thrust::copy_if(d_obj_itr, d_obj_itr + TEST_VALUES, d_copy_wrapper, SelectOp()); - - int compare = CompareDeviceResults(h_data, d_copy, TEST_VALUES, g_verbose, g_verbose); - printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - CubDebugExit(d_obj_itr.UnbindTexture()); - - if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy)); - -#endif // THRUST_VERSION - - if (h_data) delete[] h_data; - if (d_data) CubDebugExit(g_allocator.DeviceFree(d_data)); - if (d_dummy) CubDebugExit(g_allocator.DeviceFree(d_dummy)); -} - - -#if CUDA_VERSION >= 5050 - -/** - * Test tex-ref texture iterator - */ -template -void TestTexRef() -{ - printf("\nTesting tex-ref iterator on type %s\n", typeid(T).name()); fflush(stdout); - - // - // Test iterator manipulation in kernel - // - - const unsigned int TEST_VALUES = 11000; - const unsigned int DUMMY_OFFSET = 500; - const unsigned int DUMMY_TEST_VALUES = TEST_VALUES - DUMMY_OFFSET; - - T *h_data = new T[TEST_VALUES]; - for (int i = 0; i < TEST_VALUES; ++i) - { - RandomBits(h_data[i]); - } - - // Allocate device arrays - T *d_data = NULL; - T *d_dummy = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_data, sizeof(T) * TEST_VALUES)); - CubDebugExit(cudaMemcpy(d_data, h_data, sizeof(T) * TEST_VALUES, cudaMemcpyHostToDevice)); - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_dummy, sizeof(T) * DUMMY_TEST_VALUES)); - CubDebugExit(cudaMemcpy(d_dummy, h_data + DUMMY_OFFSET, sizeof(T) * DUMMY_TEST_VALUES, cudaMemcpyHostToDevice)); - - // Initialize reference data - T h_reference[8]; - h_reference[0] = h_data[0]; // Value at offset 0 - h_reference[1] = h_data[100]; // Value at offset 100 - h_reference[2] = h_data[1000]; // Value at offset 1000 - h_reference[3] = h_data[10000]; // Value at offset 10000 - h_reference[4] = h_data[1]; // Value at offset 1 - h_reference[5] = h_data[21]; // Value at offset 21 - h_reference[6] = h_data[11]; // Value at offset 11 - h_reference[7] = h_data[0]; // Value at offset 0; - - // Create and bind ref-based test iterator - TexRefInputIterator d_ref_itr; - CubDebugExit(d_ref_itr.BindTexture((CastT*) d_data, sizeof(T) * TEST_VALUES)); - - // Create and bind dummy iterator of same type to check with interferance - TexRefInputIterator d_ref_itr2; - CubDebugExit(d_ref_itr2.BindTexture((CastT*) d_dummy, sizeof(T) * DUMMY_TEST_VALUES)); - - Test(d_ref_itr, h_reference); - -#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer - - // - // Test with thrust::copy_if() - // - - T *d_copy = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * TEST_VALUES)); - thrust::device_ptr d_copy_wrapper(d_copy); - - CubDebugExit(cudaMemset(d_copy, 0, sizeof(T) * TEST_VALUES)); - thrust::copy_if(d_ref_itr, d_ref_itr + TEST_VALUES, d_copy_wrapper, SelectOp()); - - int compare = CompareDeviceResults(h_data, d_copy, TEST_VALUES, g_verbose, g_verbose); - printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy)); - -#endif // THRUST_VERSION - - CubDebugExit(d_ref_itr.UnbindTexture()); - CubDebugExit(d_ref_itr2.UnbindTexture()); - - if (h_data) delete[] h_data; - if (d_data) CubDebugExit(g_allocator.DeviceFree(d_data)); - if (d_dummy) CubDebugExit(g_allocator.DeviceFree(d_dummy)); -} - - -/** - * Test texture transform iterator - */ -template -void TestTexTransform() -{ - printf("\nTesting tex-transform iterator on type %s\n", typeid(T).name()); fflush(stdout); - - // - // Test iterator manipulation in kernel - // - - const unsigned int TEST_VALUES = 11000; - - T *h_data = new T[TEST_VALUES]; - for (int i = 0; i < TEST_VALUES; ++i) - { - InitValue(INTEGER_SEED, h_data[i], i); - } - - // Allocate device arrays - T *d_data = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_data, sizeof(T) * TEST_VALUES)); - CubDebugExit(cudaMemcpy(d_data, h_data, sizeof(T) * TEST_VALUES, cudaMemcpyHostToDevice)); - - TransformOp op; - - // Initialize reference data - T h_reference[8]; - h_reference[0] = op(h_data[0]); // Value at offset 0 - h_reference[1] = op(h_data[100]); // Value at offset 100 - h_reference[2] = op(h_data[1000]); // Value at offset 1000 - h_reference[3] = op(h_data[10000]); // Value at offset 10000 - h_reference[4] = op(h_data[1]); // Value at offset 1 - h_reference[5] = op(h_data[21]); // Value at offset 21 - h_reference[6] = op(h_data[11]); // Value at offset 11 - h_reference[7] = op(h_data[0]); // Value at offset 0; - - // Create and bind texture iterator - typedef TexRefInputIterator TextureIterator; - - TextureIterator d_tex_itr; - CubDebugExit(d_tex_itr.BindTexture((CastT*) d_data, sizeof(T) * TEST_VALUES)); - - // Create transform iterator - TransformInputIterator, TextureIterator> xform_itr(d_tex_itr, op); - - Test(xform_itr, h_reference); - -#if (THRUST_VERSION >= 100700) // Thrust 1.7 or newer - - // - // Test with thrust::copy_if() - // - - T *h_copy = new T[TEST_VALUES]; - for (int i = 0; i < TEST_VALUES; ++i) - h_copy[i] = op(h_data[i]); - - T *d_copy = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_copy, sizeof(T) * TEST_VALUES)); - thrust::device_ptr d_copy_wrapper(d_copy); - - thrust::copy_if(xform_itr, xform_itr + TEST_VALUES, d_copy_wrapper, SelectOp()); - - int compare = CompareDeviceResults(h_copy, d_copy, TEST_VALUES, g_verbose, g_verbose); - printf("\tthrust::copy_if(): %s\n", (compare) ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Cleanup - if (h_copy) delete[] h_copy; - if (d_copy) CubDebugExit(g_allocator.DeviceFree(d_copy)); - -#endif // THRUST_VERSION - - CubDebugExit(d_tex_itr.UnbindTexture()); - if (h_data) delete[] h_data; - if (d_data) CubDebugExit(g_allocator.DeviceFree(d_data)); -} - -#endif // CUDA_VERSION - - - - -/** - * Run non-integer tests - */ -template -void Test(Int2Type is_integer) -{ - TestModified(); - TestTransform(); - -#if CUB_CDP - // Test tex-obj iterators if CUDA dynamic parallelism enabled - TestTexObj(type_string); -#endif // CUB_CDP - -#if CUDA_VERSION >= 5050 - // Test tex-ref iterators for CUDA 5.5 - TestTexRef(); - TestTexTransform(); -#endif // CUDA_VERSION -} - -/** - * Run integer tests - */ -template -void Test(Int2Type is_integer) -{ - TestConstant(0); - TestConstant(99); - - TestCounting(0); - TestCounting(99); - - // Run non-integer tests - Test(Int2Type()); -} - -/** - * Run tests - */ -template -void Test() -{ - enum { - IS_INTEGER = (Traits::CATEGORY == SIGNED_INTEGER) || (Traits::CATEGORY == UNSIGNED_INTEGER) - }; - - // Test non-const type - Test(Int2Type()); - - // Test non-const type - Test(Int2Type()); -} - - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - - // Get ptx version - int ptx_version; - CubDebugExit(PtxVersion(ptx_version)); - - // Evaluate different data types - Test(); - Test(); - Test(); - Test(); - Test(); - Test(); - if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted - Test(); - - Test(); - Test(); - Test(); - Test(); - Test(); - Test(); - if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted - Test(); - - Test(); - Test(); - Test(); - Test(); - Test(); - Test(); - if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted - Test(); - - Test(); - Test(); - Test(); - Test(); - Test(); - Test(); - if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted - Test(); - - Test(); - Test(); - - printf("\nTest complete\n"); fflush(stdout); - - return 0; -} - - - diff --git a/ml-xgboost/cub/test/test_util.h b/ml-xgboost/cub/test/test_util.h deleted file mode 100644 index f29595f..0000000 --- a/ml-xgboost/cub/test/test_util.h +++ /dev/null @@ -1,1600 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - - -#pragma once - -#if defined(_WIN32) || defined(_WIN64) - #include - #undef small // Windows is terrible for polluting macro namespace -#else - #include -#endif - -#include - -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "mersenne.h" - -#include "cub/util_debug.cuh" -#include "cub/util_device.cuh" -#include "cub/util_type.cuh" -#include "cub/util_macro.cuh" - -/****************************************************************************** - * Assertion macros - ******************************************************************************/ - -/** - * Assert equals - */ -#define AssertEquals(a, b) if ((a) != (b)) { std::cerr << "\n(" << __FILE__ << ": " << __LINE__ << ")\n"; exit(1);} - - -/****************************************************************************** - * Command-line parsing functionality - ******************************************************************************/ - -/** - * Utility for parsing command line arguments - */ -struct CommandLineArgs -{ - - std::vector keys; - std::vector values; - std::vector args; - cudaDeviceProp deviceProp; - float device_giga_bandwidth; - size_t device_free_physmem; - size_t device_total_physmem; - - /** - * Constructor - */ - CommandLineArgs(int argc, char **argv) : - keys(10), - values(10) - { - using namespace std; - - // Initialize mersenne generator - unsigned int mersenne_init[4]= {0x123, 0x234, 0x345, 0x456}; - mersenne::init_by_array(mersenne_init, 4); - - for (int i = 1; i < argc; i++) - { - string arg = argv[i]; - - if ((arg[0] != '-') || (arg[1] != '-')) - { - args.push_back(arg); - continue; - } - - string::size_type pos; - string key, val; - if ((pos = arg.find('=')) == string::npos) { - key = string(arg, 2, arg.length() - 2); - val = ""; - } else { - key = string(arg, 2, pos - 2); - val = string(arg, pos + 1, arg.length() - 1); - } - - keys.push_back(key); - values.push_back(val); - } - } - - - /** - * Checks whether a flag "--" is present in the commandline - */ - bool CheckCmdLineFlag(const char* arg_name) - { - using namespace std; - - for (int i = 0; i < int(keys.size()); ++i) - { - if (keys[i] == string(arg_name)) - return true; - } - return false; - } - - - /** - * Returns number of naked (non-flag and non-key-value) commandline parameters - */ - template - int NumNakedArgs() - { - return args.size(); - } - - - /** - * Returns the commandline parameter for a given index (not including flags) - */ - template - void GetCmdLineArgument(int index, T &val) - { - using namespace std; - if (index < args.size()) { - istringstream str_stream(args[index]); - str_stream >> val; - } - } - - /** - * Returns the value specified for a given commandline parameter --= - */ - template - void GetCmdLineArgument(const char *arg_name, T &val) - { - using namespace std; - - for (int i = 0; i < int(keys.size()); ++i) - { - if (keys[i] == string(arg_name)) - { - istringstream str_stream(values[i]); - str_stream >> val; - } - } - } - - - /** - * Returns the values specified for a given commandline parameter --=,* - */ - template - void GetCmdLineArguments(const char *arg_name, std::vector &vals) - { - using namespace std; - - if (CheckCmdLineFlag(arg_name)) - { - // Clear any default values - vals.clear(); - - // Recover from multi-value string - for (int i = 0; i < keys.size(); ++i) - { - if (keys[i] == string(arg_name)) - { - string val_string(values[i]); - istringstream str_stream(val_string); - string::size_type old_pos = 0; - string::size_type new_pos = 0; - - // Iterate comma-separated values - T val; - while ((new_pos = val_string.find(',', old_pos)) != string::npos) - { - if (new_pos != old_pos) - { - str_stream.width(new_pos - old_pos); - str_stream >> val; - vals.push_back(val); - } - - // skip over comma - str_stream.ignore(1); - old_pos = new_pos + 1; - } - - // Read last value - str_stream >> val; - vals.push_back(val); - } - } - } - } - - - /** - * The number of pairs parsed - */ - int ParsedArgc() - { - return (int) keys.size(); - } - - /** - * Initialize device - */ - cudaError_t DeviceInit(int dev = -1) - { - cudaError_t error = cudaSuccess; - - do - { - int deviceCount; - error = CubDebug(cudaGetDeviceCount(&deviceCount)); - if (error) break; - - if (deviceCount == 0) { - fprintf(stderr, "No devices supporting CUDA.\n"); - exit(1); - } - if (dev < 0) - { - GetCmdLineArgument("device", dev); - } - if ((dev > deviceCount - 1) || (dev < 0)) - { - dev = 0; - } - - error = CubDebug(cudaSetDevice(dev)); - if (error) break; - - CubDebugExit(cudaMemGetInfo(&device_free_physmem, &device_total_physmem)); - - int ptx_version; - error = CubDebug(cub::PtxVersion(ptx_version)); - if (error) break; - - error = CubDebug(cudaGetDeviceProperties(&deviceProp, dev)); - if (error) break; - - if (deviceProp.major < 1) { - fprintf(stderr, "Device does not support CUDA.\n"); - exit(1); - } - - device_giga_bandwidth = float(deviceProp.memoryBusWidth) * deviceProp.memoryClockRate * 2 / 8 / 1000 / 1000; - - if (!CheckCmdLineFlag("quiet")) - { - printf( - "Using device %d: %s (PTX version %d, SM%d, %d SMs, " - "%lld free / %lld total MB physmem, " - "%.3f GB/s @ %d kHz mem clock, ECC %s)\n", - dev, - deviceProp.name, - ptx_version, - deviceProp.major * 100 + deviceProp.minor * 10, - deviceProp.multiProcessorCount, - (unsigned long long) device_free_physmem / 1024 / 1024, - (unsigned long long) device_total_physmem / 1024 / 1024, - device_giga_bandwidth, - deviceProp.memoryClockRate, - (deviceProp.ECCEnabled) ? "on" : "off"); - fflush(stdout); - } - - } while (0); - - return error; - } -}; - -/****************************************************************************** - * Random bits generator - ******************************************************************************/ - -int g_num_rand_samples = 0; - - -template -bool IsNaN(T val) { return false; } - -template<> -__noinline__ bool IsNaN(float val) -{ - volatile unsigned int bits = reinterpret_cast(val); - - return (((bits >= 0x7F800001) && (bits <= 0x7FFFFFFF)) || - ((bits >= 0xFF800001) && (bits <= 0xFFFFFFFF))); -} - -template<> -__noinline__ bool IsNaN(float1 val) -{ - return (IsNaN(val.x)); -} - -template<> -__noinline__ bool IsNaN(float2 val) -{ - return (IsNaN(val.y) || IsNaN(val.x)); -} - -template<> -__noinline__ bool IsNaN(float3 val) -{ - return (IsNaN(val.z) || IsNaN(val.y) || IsNaN(val.x)); -} - -template<> -__noinline__ bool IsNaN(float4 val) -{ - return (IsNaN(val.y) || IsNaN(val.x) || IsNaN(val.w) || IsNaN(val.z)); -} - -template<> -__noinline__ bool IsNaN(double val) -{ - volatile unsigned long long bits = *reinterpret_cast(&val); - - return (((bits >= 0x7FF0000000000001) && (bits <= 0x7FFFFFFFFFFFFFFF)) || - ((bits >= 0xFFF0000000000001) && (bits <= 0xFFFFFFFFFFFFFFFF))); -} - -template<> -__noinline__ bool IsNaN(double1 val) -{ - return (IsNaN(val.x)); -} - -template<> -__noinline__ bool IsNaN(double2 val) -{ - return (IsNaN(val.y) || IsNaN(val.x)); -} - -template<> -__noinline__ bool IsNaN(double3 val) -{ - return (IsNaN(val.z) || IsNaN(val.y) || IsNaN(val.x)); -} - -template<> -__noinline__ bool IsNaN(double4 val) -{ - return (IsNaN(val.y) || IsNaN(val.x) || IsNaN(val.w) || IsNaN(val.z)); -} - - -/** - * Generates random keys. - * - * We always take the second-order byte from rand() because the higher-order - * bits returned by rand() are commonly considered more uniformly distributed - * than the lower-order bits. - * - * We can decrease the entropy level of keys by adopting the technique - * of Thearling and Smith in which keys are computed from the bitwise AND of - * multiple random samples: - * - * entropy_reduction | Effectively-unique bits per key - * ----------------------------------------------------- - * -1 | 0 - * 0 | 32 - * 1 | 25.95 (81%) - * 2 | 17.41 (54%) - * 3 | 10.78 (34%) - * 4 | 6.42 (20%) - * ... | ... - * - */ -template -void RandomBits( - K &key, - int entropy_reduction = 0, - int begin_bit = 0, - int end_bit = sizeof(K) * 8) -{ - const int NUM_BYTES = sizeof(K); - const int WORD_BYTES = sizeof(unsigned int); - const int NUM_WORDS = (NUM_BYTES + WORD_BYTES - 1) / WORD_BYTES; - - unsigned int word_buff[NUM_WORDS]; - - if (entropy_reduction == -1) - { - memset((void *) &key, 0, sizeof(key)); - return; - } - - if (end_bit < 0) - end_bit = sizeof(K) * 8; - - while (true) - { - // Generate random word_buff - for (int j = 0; j < NUM_WORDS; j++) - { - int current_bit = j * WORD_BYTES * 8; - - unsigned int word = 0xffffffff; - word &= 0xffffffff << CUB_MAX(0, begin_bit - current_bit); - word &= 0xffffffff >> CUB_MAX(0, (current_bit + (WORD_BYTES * 8)) - end_bit); - - for (int i = 0; i <= entropy_reduction; i++) - { - // Grab some of the higher bits from rand (better entropy, supposedly) - word &= mersenne::genrand_int32(); - g_num_rand_samples++; - } - - word_buff[j] = word; - } - - memcpy(&key, word_buff, sizeof(K)); - - K copy = key; - if (!IsNaN(copy)) - break; // avoids NaNs when generating random floating point numbers - } -} - -/// Randomly select number between [0:max) -template -T RandomValue(T max) -{ - unsigned int bits; - unsigned int max_int = (unsigned int) -1; - do { - RandomBits(bits); - } while (bits == max_int); - - return (T) ((double(bits) / double(max_int)) * double(max)); -} - - -/****************************************************************************** - * Console printing utilities - ******************************************************************************/ - -/** - * Helper for casting character types to integers for cout printing - */ -template -T CoutCast(T val) { return val; } - -int CoutCast(char val) { return val; } - -int CoutCast(unsigned char val) { return val; } - -int CoutCast(signed char val) { return val; } - - - -/****************************************************************************** - * Test value initialization utilities - ******************************************************************************/ - -/** - * Test problem generation options - */ -enum GenMode -{ - UNIFORM, // Assign to '2', regardless of integer seed - INTEGER_SEED, // Assign to integer seed - RANDOM, // Assign to random, regardless of integer seed -}; - -/** - * Initialize value - */ -template -__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, T &value, int index = 0) -{ - switch (gen_mode) - { -#if (CUB_PTX_ARCH == 0) - case RANDOM: - RandomBits(value); - break; -#endif - case UNIFORM: - value = 2; - break; - case INTEGER_SEED: - default: - value = (T) index; - break; - } -} - - -/** - * Initialize value (bool) - */ -__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, bool &value, int index = 0) -{ - switch (gen_mode) - { -#if (CUB_PTX_ARCH == 0) - case RANDOM: - char c; - RandomBits(c, 0, 0, 1); - value = (c > 0); - break; -#endif - case UNIFORM: - value = true; - break; - case INTEGER_SEED: - default: - value = (index > 0); - break; - } -} - - -/** - * cub::NullType test initialization - */ -__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, cub::NullType &value, int index = 0) -{} - - -/** - * cub::KeyValuePairtest initialization - */ -template -__host__ __device__ __forceinline__ void InitValue( - GenMode gen_mode, - cub::KeyValuePair& value, - int index = 0) -{ - InitValue(gen_mode, value.value, index); - - // Assign corresponding flag with a likelihood of the last bit being set with entropy-reduction level 3 - RandomBits(value.key, 3); - value.key = (value.key & 0x1); -} - - - -/****************************************************************************** - * Comparison and ostream operators - ******************************************************************************/ - -/** - * KeyValuePair ostream operator - */ -template -std::ostream& operator<<(std::ostream& os, const cub::KeyValuePair &val) -{ - os << '(' << CoutCast(val.key) << ',' << CoutCast(val.value) << ')'; - return os; -} - - -/****************************************************************************** - * Comparison and ostream operators for CUDA vector types - ******************************************************************************/ - -/** - * Vector1 overloads - */ -#define CUB_VEC_OVERLOAD_1(T, BaseT) \ - /* Ostream output */ \ - std::ostream& operator<<( \ - std::ostream& os, \ - const T& val) \ - { \ - os << '(' << CoutCast(val.x) << ')'; \ - return os; \ - } \ - /* Inequality */ \ - __host__ __device__ __forceinline__ bool operator!=( \ - const T &a, \ - const T &b) \ - { \ - return (a.x != b.x); \ - } \ - /* Equality */ \ - __host__ __device__ __forceinline__ bool operator==( \ - const T &a, \ - const T &b) \ - { \ - return (a.x == b.x); \ - } \ - /* Test initialization */ \ - __host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, T &value, int index = 0) \ - { \ - InitValue(gen_mode, value.x, index); \ - } \ - /* Max */ \ - __host__ __device__ __forceinline__ bool operator>( \ - const T &a, \ - const T &b) \ - { \ - return (a.x > b.x); \ - } \ - /* Min */ \ - __host__ __device__ __forceinline__ bool operator<( \ - const T &a, \ - const T &b) \ - { \ - return (a.x < b.x); \ - } \ - /* Summation (non-reference addends for VS2003 -O3 warpscan workaround */ \ - __host__ __device__ __forceinline__ T operator+( \ - T a, \ - T b) \ - { \ - T retval = make_##T(a.x + b.x); \ - return retval; \ - } \ - namespace cub { \ - template<> \ - struct NumericTraits \ - { \ - static const Category CATEGORY = NOT_A_NUMBER; \ - enum { \ - PRIMITIVE = false, \ - NULL_TYPE = false, \ - }; \ - static T Max() \ - { \ - T retval = { \ - NumericTraits::Max()}; \ - return retval; \ - } \ - static T Lowest() \ - { \ - T retval = { \ - NumericTraits::Lowest()}; \ - return retval; \ - } \ - }; \ - } /* namespace std */ - - - -/** - * Vector2 overloads - */ -#define CUB_VEC_OVERLOAD_2(T, BaseT) \ - /* Ostream output */ \ - std::ostream& operator<<( \ - std::ostream& os, \ - const T& val) \ - { \ - os << '(' \ - << CoutCast(val.x) << ',' \ - << CoutCast(val.y) << ')'; \ - return os; \ - } \ - /* Inequality */ \ - __host__ __device__ __forceinline__ bool operator!=( \ - const T &a, \ - const T &b) \ - { \ - return (a.x != b.x) || \ - (a.y != b.y); \ - } \ - /* Equality */ \ - __host__ __device__ __forceinline__ bool operator==( \ - const T &a, \ - const T &b) \ - { \ - return (a.x == b.x) && \ - (a.y == b.y); \ - } \ - /* Test initialization */ \ - __host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, T &value, int index = 0) \ - { \ - InitValue(gen_mode, value.x, index); \ - InitValue(gen_mode, value.y, index); \ - } \ - /* Max */ \ - __host__ __device__ __forceinline__ bool operator>( \ - const T &a, \ - const T &b) \ - { \ - if (a.x > b.x) return true; else if (b.x > a.x) return false; \ - return a.y > b.y; \ - } \ - /* Min */ \ - __host__ __device__ __forceinline__ bool operator<( \ - const T &a, \ - const T &b) \ - { \ - if (a.x < b.x) return true; else if (b.x < a.x) return false; \ - return a.y < b.y; \ - } \ - /* Summation (non-reference addends for VS2003 -O3 warpscan workaround */ \ - __host__ __device__ __forceinline__ T operator+( \ - T a, \ - T b) \ - { \ - T retval = make_##T( \ - a.x + b.x, \ - a.y + b.y); \ - return retval; \ - } \ - namespace cub { \ - template<> \ - struct NumericTraits \ - { \ - static const Category CATEGORY = NOT_A_NUMBER; \ - enum { \ - PRIMITIVE = false, \ - NULL_TYPE = false, \ - }; \ - static T Max() \ - { \ - T retval = { \ - NumericTraits::Max(), \ - NumericTraits::Max()}; \ - return retval; \ - } \ - static T Lowest() \ - { \ - T retval = { \ - NumericTraits::Lowest(), \ - NumericTraits::Lowest()}; \ - return retval; \ - } \ - }; \ - } /* namespace cub */ - - - -/** - * Vector3 overloads - */ -#define CUB_VEC_OVERLOAD_3(T, BaseT) \ - /* Ostream output */ \ - std::ostream& operator<<( \ - std::ostream& os, \ - const T& val) \ - { \ - os << '(' \ - << CoutCast(val.x) << ',' \ - << CoutCast(val.y) << ',' \ - << CoutCast(val.z) << ')'; \ - return os; \ - } \ - /* Inequality */ \ - __host__ __device__ __forceinline__ bool operator!=( \ - const T &a, \ - const T &b) \ - { \ - return (a.x != b.x) || \ - (a.y != b.y) || \ - (a.z != b.z); \ - } \ - /* Equality */ \ - __host__ __device__ __forceinline__ bool operator==( \ - const T &a, \ - const T &b) \ - { \ - return (a.x == b.x) && \ - (a.y == b.y) && \ - (a.z == b.z); \ - } \ - /* Test initialization */ \ - __host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, T &value, int index = 0) \ - { \ - InitValue(gen_mode, value.x, index); \ - InitValue(gen_mode, value.y, index); \ - InitValue(gen_mode, value.z, index); \ - } \ - /* Max */ \ - __host__ __device__ __forceinline__ bool operator>( \ - const T &a, \ - const T &b) \ - { \ - if (a.x > b.x) return true; else if (b.x > a.x) return false; \ - if (a.y > b.y) return true; else if (b.y > a.y) return false; \ - return a.z > b.z; \ - } \ - /* Min */ \ - __host__ __device__ __forceinline__ bool operator<( \ - const T &a, \ - const T &b) \ - { \ - if (a.x < b.x) return true; else if (b.x < a.x) return false; \ - if (a.y < b.y) return true; else if (b.y < a.y) return false; \ - return a.z < b.z; \ - } \ - /* Summation (non-reference addends for VS2003 -O3 warpscan workaround */ \ - __host__ __device__ __forceinline__ T operator+( \ - T a, \ - T b) \ - { \ - T retval = make_##T( \ - a.x + b.x, \ - a.y + b.y, \ - a.z + b.z); \ - return retval; \ - } \ - namespace cub { \ - template<> \ - struct NumericTraits \ - { \ - static const Category CATEGORY = NOT_A_NUMBER; \ - enum { \ - PRIMITIVE = false, \ - NULL_TYPE = false, \ - }; \ - static T Max() \ - { \ - T retval = { \ - NumericTraits::Max(), \ - NumericTraits::Max(), \ - NumericTraits::Max()}; \ - return retval; \ - } \ - static T Lowest() \ - { \ - T retval = { \ - NumericTraits::Lowest(), \ - NumericTraits::Lowest(), \ - NumericTraits::Lowest()}; \ - return retval; \ - } \ - }; \ - } /* namespace cub */ - - -/** - * Vector4 overloads - */ -#define CUB_VEC_OVERLOAD_4(T, BaseT) \ - /* Ostream output */ \ - std::ostream& operator<<( \ - std::ostream& os, \ - const T& val) \ - { \ - os << '(' \ - << CoutCast(val.x) << ',' \ - << CoutCast(val.y) << ',' \ - << CoutCast(val.z) << ',' \ - << CoutCast(val.w) << ')'; \ - return os; \ - } \ - /* Inequality */ \ - __host__ __device__ __forceinline__ bool operator!=( \ - const T &a, \ - const T &b) \ - { \ - return (a.x != b.x) || \ - (a.y != b.y) || \ - (a.z != b.z) || \ - (a.w != b.w); \ - } \ - /* Equality */ \ - __host__ __device__ __forceinline__ bool operator==( \ - const T &a, \ - const T &b) \ - { \ - return (a.x == b.x) && \ - (a.y == b.y) && \ - (a.z == b.z) && \ - (a.w == b.w); \ - } \ - /* Test initialization */ \ - __host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, T &value, int index = 0) \ - { \ - InitValue(gen_mode, value.x, index); \ - InitValue(gen_mode, value.y, index); \ - InitValue(gen_mode, value.z, index); \ - InitValue(gen_mode, value.w, index); \ - } \ - /* Max */ \ - __host__ __device__ __forceinline__ bool operator>( \ - const T &a, \ - const T &b) \ - { \ - if (a.x > b.x) return true; else if (b.x > a.x) return false; \ - if (a.y > b.y) return true; else if (b.y > a.y) return false; \ - if (a.z > b.z) return true; else if (b.z > a.z) return false; \ - return a.w > b.w; \ - } \ - /* Min */ \ - __host__ __device__ __forceinline__ bool operator<( \ - const T &a, \ - const T &b) \ - { \ - if (a.x < b.x) return true; else if (b.x < a.x) return false; \ - if (a.y < b.y) return true; else if (b.y < a.y) return false; \ - if (a.z < b.z) return true; else if (b.z < a.z) return false; \ - return a.w < b.w; \ - } \ - /* Summation (non-reference addends for VS2003 -O3 warpscan workaround */ \ - __host__ __device__ __forceinline__ T operator+( \ - T a, \ - T b) \ - { \ - T retval = make_##T( \ - a.x + b.x, \ - a.y + b.y, \ - a.z + b.z, \ - a.w + b.w); \ - return retval; \ - } \ - namespace cub { \ - template<> \ - struct NumericTraits \ - { \ - static const Category CATEGORY = NOT_A_NUMBER; \ - enum { \ - PRIMITIVE = false, \ - NULL_TYPE = false, \ - }; \ - static T Max() \ - { \ - T retval = { \ - NumericTraits::Max(), \ - NumericTraits::Max(), \ - NumericTraits::Max(), \ - NumericTraits::Max()}; \ - return retval; \ - } \ - static T Lowest() \ - { \ - T retval = { \ - NumericTraits::Lowest(), \ - NumericTraits::Lowest(), \ - NumericTraits::Lowest(), \ - NumericTraits::Lowest()}; \ - return retval; \ - } \ - }; \ - } /* namespace cub */ - -/** - * All vector overloads - */ -#define CUB_VEC_OVERLOAD(COMPONENT_T, BaseT) \ - CUB_VEC_OVERLOAD_1(COMPONENT_T##1, BaseT) \ - CUB_VEC_OVERLOAD_2(COMPONENT_T##2, BaseT) \ - CUB_VEC_OVERLOAD_3(COMPONENT_T##3, BaseT) \ - CUB_VEC_OVERLOAD_4(COMPONENT_T##4, BaseT) - -/** - * Define for types - */ -CUB_VEC_OVERLOAD(char, char) -CUB_VEC_OVERLOAD(short, short) -CUB_VEC_OVERLOAD(int, int) -CUB_VEC_OVERLOAD(long, long) -CUB_VEC_OVERLOAD(longlong, long long) -CUB_VEC_OVERLOAD(uchar, unsigned char) -CUB_VEC_OVERLOAD(ushort, unsigned short) -CUB_VEC_OVERLOAD(uint, unsigned int) -CUB_VEC_OVERLOAD(ulong, unsigned long) -CUB_VEC_OVERLOAD(ulonglong, unsigned long long) -CUB_VEC_OVERLOAD(float, float) -CUB_VEC_OVERLOAD(double, double) - - -//--------------------------------------------------------------------- -// Complex data type TestFoo -//--------------------------------------------------------------------- - -/** - * TestFoo complex data type - */ -struct TestFoo -{ - long long x; - int y; - short z; - char w; - - // Factory - static __host__ __device__ __forceinline__ TestFoo MakeTestFoo(long long x, int y, short z, char w) - { - TestFoo retval = {x, y, z, w}; - return retval; - } - - // Assignment from int operator - __host__ __device__ __forceinline__ TestFoo& operator =(int b) - { - x = b; - y = b; - z = b; - w = b; - return *this; - } - - // Summation operator - __host__ __device__ __forceinline__ TestFoo operator+(const TestFoo &b) const - { - return MakeTestFoo(x + b.x, y + b.y, z + b.z, w + b.w); - } - - // Inequality operator - __host__ __device__ __forceinline__ bool operator !=(const TestFoo &b) const - { - return (x != b.x) || (y != b.y) || (z != b.z) || (w != b.w); - } - - // Equality operator - __host__ __device__ __forceinline__ bool operator ==(const TestFoo &b) const - { - return (x == b.x) && (y == b.y) && (z == b.z) && (w == b.w); - } - - // Less than operator - __host__ __device__ __forceinline__ bool operator <(const TestFoo &b) const - { - if (x < b.x) return true; else if (b.x < x) return false; - if (y < b.y) return true; else if (b.y < y) return false; - if (z < b.z) return true; else if (b.z < z) return false; - return w < b.w; - } - - // Greater than operator - __host__ __device__ __forceinline__ bool operator >(const TestFoo &b) const - { - if (x > b.x) return true; else if (b.x > x) return false; - if (y > b.y) return true; else if (b.y > y) return false; - if (z > b.z) return true; else if (b.z > z) return false; - return w > b.w; - } - -}; - -/** - * TestFoo ostream operator - */ -std::ostream& operator<<(std::ostream& os, const TestFoo& val) -{ - os << '(' << val.x << ',' << val.y << ',' << val.z << ',' << CoutCast(val.w) << ')'; - return os; -} - -/** - * TestFoo test initialization - */ -__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, TestFoo &value, int index = 0) -{ - InitValue(gen_mode, value.x, index); - InitValue(gen_mode, value.y, index); - InitValue(gen_mode, value.z, index); - InitValue(gen_mode, value.w, index); -} - - -/// numeric_limits specialization -namespace cub { -template<> -struct NumericTraits -{ - static const Category CATEGORY = NOT_A_NUMBER; - enum { - PRIMITIVE = false, - NULL_TYPE = false, - }; - static TestFoo Max() - { - return TestFoo::MakeTestFoo( - NumericTraits::Max(), - NumericTraits::Max(), - NumericTraits::Max(), - NumericTraits::Max()); - } - - static TestFoo Lowest() - { - return TestFoo::MakeTestFoo( - NumericTraits::Lowest(), - NumericTraits::Lowest(), - NumericTraits::Lowest(), - NumericTraits::Lowest()); - } -}; -} // namespace cub - - -//--------------------------------------------------------------------- -// Complex data type TestBar (with optimizations for fence-free warp-synchrony) -//--------------------------------------------------------------------- - -/** - * TestBar complex data type - */ -struct TestBar -{ - long long x; - int y; - - // Constructor - __host__ __device__ __forceinline__ TestBar() : x(0), y(0) - {} - - // Constructor - __host__ __device__ __forceinline__ TestBar(int b) : x(b), y(b) - {} - - // Constructor - __host__ __device__ __forceinline__ TestBar(long long x, int y) : x(x), y(y) - {} - - // Assignment from int operator - __host__ __device__ __forceinline__ TestBar& operator =(int b) - { - x = b; - y = b; - return *this; - } - - // Summation operator - __host__ __device__ __forceinline__ TestBar operator+(const TestBar &b) const - { - return TestBar(x + b.x, y + b.y); - } - - // Inequality operator - __host__ __device__ __forceinline__ bool operator !=(const TestBar &b) const - { - return (x != b.x) || (y != b.y); - } - - // Equality operator - __host__ __device__ __forceinline__ bool operator ==(const TestBar &b) const - { - return (x == b.x) && (y == b.y); - } - - // Less than operator - __host__ __device__ __forceinline__ bool operator <(const TestBar &b) const - { - if (x < b.x) return true; else if (b.x < x) return false; - return y < b.y; - } - - // Greater than operator - __host__ __device__ __forceinline__ bool operator >(const TestBar &b) const - { - if (x > b.x) return true; else if (b.x > x) return false; - return y > b.y; - } - -}; - - -/** - * TestBar ostream operator - */ -std::ostream& operator<<(std::ostream& os, const TestBar& val) -{ - os << '(' << val.x << ',' << val.y << ')'; - return os; -} - -/** - * TestBar test initialization - */ -__host__ __device__ __forceinline__ void InitValue(GenMode gen_mode, TestBar &value, int index = 0) -{ - InitValue(gen_mode, value.x, index); - InitValue(gen_mode, value.y, index); -} - -/// numeric_limits specialization -namespace cub { -template<> -struct NumericTraits -{ - static const Category CATEGORY = NOT_A_NUMBER; - enum { - PRIMITIVE = false, - NULL_TYPE = false, - }; - static TestBar Max() - { - return TestBar( - NumericTraits::Max(), - NumericTraits::Max()); - } - - static TestBar Lowest() - { - return TestBar( - NumericTraits::Lowest(), - NumericTraits::Lowest()); - } -}; -} // namespace cub - - -/****************************************************************************** - * Helper routines for list comparison and display - ******************************************************************************/ - - -/** - * Compares the equivalence of two arrays - */ -template -int CompareResults(T* computed, S* reference, OffsetT len, bool verbose = true) -{ - for (OffsetT i = 0; i < len; i++) - { - if (computed[i] != reference[i]) - { - if (verbose) std::cout << "INCORRECT: [" << i << "]: " - << CoutCast(computed[i]) << " != " - << CoutCast(reference[i]); - return 1; - } - } - return 0; -} - - -/** - * Compares the equivalence of two arrays - */ -template -int CompareResults(float* computed, float* reference, OffsetT len, bool verbose = true) -{ - for (OffsetT i = 0; i < len; i++) - { - if (computed[i] != reference[i]) - { - float difference = std::abs(computed[i]-reference[i]); - float fraction = difference / std::abs(reference[i]); - - if (fraction > 0.0001) - { - if (verbose) std::cout << "INCORRECT: [" << i << "]: " - << "(computed) " << CoutCast(computed[i]) << " != " - << CoutCast(reference[i]) << " (difference:" << difference << ", fraction: " << fraction << ")"; - return 1; - } - } - } - return 0; -} - - -/** - * Compares the equivalence of two arrays - */ -template -int CompareResults(cub::NullType* computed, cub::NullType* reference, OffsetT len, bool verbose = true) -{ - return 0; -} - -/** - * Compares the equivalence of two arrays - */ -template -int CompareResults(double* computed, double* reference, OffsetT len, bool verbose = true) -{ - for (OffsetT i = 0; i < len; i++) - { - if (computed[i] != reference[i]) - { - double difference = std::abs(computed[i]-reference[i]); - double fraction = difference / std::abs(reference[i]); - - if (fraction > 0.0001) - { - if (verbose) std::cout << "INCORRECT: [" << i << "]: " - << CoutCast(computed[i]) << " != " - << CoutCast(reference[i]) << " (difference:" << difference << ", fraction: " << fraction << ")"; - return 1; - } - } - } - return 0; -} - - -/** - * Verify the contents of a device array match those - * of a host array - */ -int CompareDeviceResults( - cub::NullType *h_reference, - cub::NullType *d_data, - size_t num_items, - bool verbose = true, - bool display_data = false) -{ - return 0; -} - - -/** - * Verify the contents of a device array match those - * of a host array - */ -template -int CompareDeviceResults( - S *h_reference, - T *d_data, - size_t num_items, - bool verbose = true, - bool display_data = false) -{ - // Allocate array on host - T *h_data = (T*) malloc(num_items * sizeof(T)); - - // Copy data back - cudaMemcpy(h_data, d_data, sizeof(T) * num_items, cudaMemcpyDeviceToHost); - - // Display data - if (display_data) - { - printf("Reference:\n"); - for (int i = 0; i < int(num_items); i++) - { - std::cout << CoutCast(h_reference[i]) << ", "; - } - printf("\n\nComputed:\n"); - for (int i = 0; i < int(num_items); i++) - { - std::cout << CoutCast(h_data[i]) << ", "; - } - printf("\n\n"); - } - - // Check - int retval = CompareResults(h_data, h_reference, num_items, verbose); - - // Cleanup - if (h_data) free(h_data); - - return retval; -} - - -/** - * Verify the contents of a device array match those - * of a device array - */ -template -int CompareDeviceDeviceResults( - T *d_reference, - T *d_data, - size_t num_items, - bool verbose = true, - bool display_data = false) -{ - // Allocate array on host - T *h_reference = (T*) malloc(num_items * sizeof(T)); - T *h_data = (T*) malloc(num_items * sizeof(T)); - - // Copy data back - cudaMemcpy(h_reference, d_reference, sizeof(T) * num_items, cudaMemcpyDeviceToHost); - cudaMemcpy(h_data, d_data, sizeof(T) * num_items, cudaMemcpyDeviceToHost); - - // Display data - if (display_data) { - printf("Reference:\n"); - for (int i = 0; i < num_items; i++) - { - std::cout << CoutCast(h_reference[i]) << ", "; - } - printf("\n\nComputed:\n"); - for (int i = 0; i < num_items; i++) - { - std::cout << CoutCast(h_data[i]) << ", "; - } - printf("\n\n"); - } - - // Check - int retval = CompareResults(h_data, h_reference, num_items, verbose); - - // Cleanup - if (h_reference) free(h_reference); - if (h_data) free(h_data); - - return retval; -} - - -/** - * Print the contents of a host array - */ -void DisplayResults( - cub::NullType *h_data, - size_t num_items) -{} - - -/** - * Print the contents of a host array - */ -template -void DisplayResults( - InputIteratorT h_data, - size_t num_items) -{ - // Display data - for (int i = 0; i < int(num_items); i++) - { - std::cout << CoutCast(h_data[i]) << ", "; - } - printf("\n"); -} - - -/** - * Print the contents of a device array - */ -template -void DisplayDeviceResults( - T *d_data, - size_t num_items) -{ - // Allocate array on host - T *h_data = (T*) malloc(num_items * sizeof(T)); - - // Copy data back - cudaMemcpy(h_data, d_data, sizeof(T) * num_items, cudaMemcpyDeviceToHost); - - DisplayResults(h_data, num_items); - - // Cleanup - if (h_data) free(h_data); -} - - -/****************************************************************************** - * Segment descriptor generation - ******************************************************************************/ - -/** - * Initialize segments - */ -void InitializeSegments( - int num_items, - int num_segments, - int *h_segment_offsets, - bool verbose = false) -{ - if (num_segments <= 0) - return; - - unsigned int expected_segment_length = (num_items + num_segments - 1) / num_segments; - int offset = 0; - for (int i = 0; i < num_segments; ++i) - { - h_segment_offsets[i] = offset; - - unsigned int segment_length = RandomValue((expected_segment_length * 2) + 1); - offset += segment_length; - offset = CUB_MIN(offset, num_items); - } - h_segment_offsets[num_segments] = num_items; - - if (verbose) - { - printf("Segment offsets: "); - DisplayResults(h_segment_offsets, num_segments + 1); - } -} - - -/****************************************************************************** - * Timing - ******************************************************************************/ - - -struct CpuTimer -{ -#if defined(_WIN32) || defined(_WIN64) - - LARGE_INTEGER ll_freq; - LARGE_INTEGER ll_start; - LARGE_INTEGER ll_stop; - - CpuTimer() - { - QueryPerformanceFrequency(&ll_freq); - } - - void Start() - { - QueryPerformanceCounter(&ll_start); - } - - void Stop() - { - QueryPerformanceCounter(&ll_stop); - } - - float ElapsedMillis() - { - double start = double(ll_start.QuadPart) / double(ll_freq.QuadPart); - double stop = double(ll_stop.QuadPart) / double(ll_freq.QuadPart); - - return float((stop - start) * 1000); - } - -#else - - rusage start; - rusage stop; - - void Start() - { - getrusage(RUSAGE_SELF, &start); - } - - void Stop() - { - getrusage(RUSAGE_SELF, &stop); - } - - float ElapsedMillis() - { - float sec = stop.ru_utime.tv_sec - start.ru_utime.tv_sec; - float usec = stop.ru_utime.tv_usec - start.ru_utime.tv_usec; - - return (sec * 1000) + (usec / 1000); - } - -#endif -}; - -struct GpuTimer -{ - cudaEvent_t start; - cudaEvent_t stop; - - GpuTimer() - { - cudaEventCreate(&start); - cudaEventCreate(&stop); - } - - ~GpuTimer() - { - cudaEventDestroy(start); - cudaEventDestroy(stop); - } - - void Start() - { - cudaEventRecord(start, 0); - } - - void Stop() - { - cudaEventRecord(stop, 0); - } - - float ElapsedMillis() - { - float elapsed; - cudaEventSynchronize(stop); - cudaEventElapsedTime(&elapsed, start, stop); - return elapsed; - } -}; diff --git a/ml-xgboost/cub/test/test_warp_reduce.cu b/ml-xgboost/cub/test/test_warp_reduce.cu deleted file mode 100644 index 1af4c7f..0000000 --- a/ml-xgboost/cub/test/test_warp_reduce.cu +++ /dev/null @@ -1,837 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of WarpReduce utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include - -#include -#include - -#include "test_util.h" - -using namespace cub; - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -int g_repeat = 0; -CachingDeviceAllocator g_allocator(true); - - -/** - * \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants) - */ -template< - typename OpT, - int LOGICAL_WARP_THREADS> -struct WrapperFunctor -{ - OpT op; - int num_valid; - - inline __host__ __device__ WrapperFunctor(OpT op, int num_valid) : op(op), num_valid(num_valid) {} - - template - inline __host__ __device__ T operator()(const T &a, const T &b) const - { -#if CUB_PTX_ARCH != 0 - if ((cub::LaneId() % LOGICAL_WARP_THREADS) >= num_valid) - cub::ThreadTrap(); -#endif - - return op(a, b); - } - -}; - - -//--------------------------------------------------------------------- -// Test kernels -//--------------------------------------------------------------------- - -/** - * Generic reduction - */ -template < - typename T, - typename ReductionOp, - typename WarpReduce, - bool PRIMITIVE = Traits::PRIMITIVE> -struct DeviceTest -{ - static __device__ __forceinline__ T Reduce( - typename WarpReduce::TempStorage &temp_storage, - T &data, - ReductionOp &reduction_op) - { - return WarpReduce(temp_storage).Reduce(data, reduction_op); - } - - static __device__ __forceinline__ T Reduce( - typename WarpReduce::TempStorage &temp_storage, - T &data, - ReductionOp &reduction_op, - const int &valid_warp_threads) - { - return WarpReduce(temp_storage).Reduce(data, reduction_op, valid_warp_threads); - } - - template - static __device__ __forceinline__ T HeadSegmentedReduce( - typename WarpReduce::TempStorage &temp_storage, - T &data, - FlagT &flag, - ReductionOp &reduction_op) - { - return WarpReduce(temp_storage).HeadSegmentedReduce(data, flag, reduction_op); - } - - template - static __device__ __forceinline__ T TailSegmentedReduce( - typename WarpReduce::TempStorage &temp_storage, - T &data, - FlagT &flag, - ReductionOp &reduction_op) - { - return WarpReduce(temp_storage).TailSegmentedReduce(data, flag, reduction_op); - } - -}; - - -/** - * Summation - */ -template < - typename T, - typename WarpReduce> -struct DeviceTest -{ - static __device__ __forceinline__ T Reduce( - typename WarpReduce::TempStorage &temp_storage, - T &data, - Sum &reduction_op) - { - return WarpReduce(temp_storage).Sum(data); - } - - static __device__ __forceinline__ T Reduce( - typename WarpReduce::TempStorage &temp_storage, - T &data, - Sum &reduction_op, - const int &valid_warp_threads) - { - return WarpReduce(temp_storage).Sum(data, valid_warp_threads); - } - - template - static __device__ __forceinline__ T HeadSegmentedReduce( - typename WarpReduce::TempStorage &temp_storage, - T &data, - FlagT &flag, - Sum &reduction_op) - { - return WarpReduce(temp_storage).HeadSegmentedSum(data, flag); - } - - template - static __device__ __forceinline__ T TailSegmentedReduce( - typename WarpReduce::TempStorage &temp_storage, - T &data, - FlagT &flag, - Sum &reduction_op) - { - return WarpReduce(temp_storage).TailSegmentedSum(data, flag); - } - -}; - - -/** - * Full-tile warp reduction kernel - */ -template < - int WARPS, - int LOGICAL_WARP_THREADS, - typename T, - typename ReductionOp> -__global__ void FullWarpReduceKernel( - T *d_in, - T *d_out, - ReductionOp reduction_op, - clock_t *d_elapsed) -{ - // Cooperative warp-reduce utility type (1 warp) - typedef WarpReduce WarpReduce; - - // Allocate temp storage in shared memory - __shared__ typename WarpReduce::TempStorage temp_storage[WARPS]; - - // Per-thread tile data - T input = d_in[threadIdx.x]; - - // Record elapsed clocks - __threadfence_block(); // workaround to prevent clock hoisting - clock_t start = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - // Test warp reduce - int warp_id = threadIdx.x / LOGICAL_WARP_THREADS; - - T output = DeviceTest::Reduce( - temp_storage[warp_id], input, reduction_op); - - // Record elapsed clocks - __threadfence_block(); // workaround to prevent clock hoisting - clock_t stop = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - *d_elapsed = stop - start; - - // Store aggregate - d_out[threadIdx.x] = (threadIdx.x % LOGICAL_WARP_THREADS == 0) ? - output : - input; -} - -/** - * Partially-full warp reduction kernel - */ -template < - int WARPS, - int LOGICAL_WARP_THREADS, - typename T, - typename ReductionOp> -__global__ void PartialWarpReduceKernel( - T *d_in, - T *d_out, - ReductionOp reduction_op, - clock_t *d_elapsed, - int valid_warp_threads) -{ - // Cooperative warp-reduce utility type - typedef WarpReduce WarpReduce; - - // Allocate temp storage in shared memory - __shared__ typename WarpReduce::TempStorage temp_storage[WARPS]; - - // Per-thread tile data - T input = d_in[threadIdx.x]; - - // Record elapsed clocks - __threadfence_block(); // workaround to prevent clock hoisting - clock_t start = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - // Test partial-warp reduce - int warp_id = threadIdx.x / LOGICAL_WARP_THREADS; - T output = DeviceTest::Reduce( - temp_storage[warp_id], input, reduction_op, valid_warp_threads); - - // Record elapsed clocks - __threadfence_block(); // workaround to prevent clock hoisting - clock_t stop = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - *d_elapsed = stop - start; - - // Store aggregate - d_out[threadIdx.x] = (threadIdx.x % LOGICAL_WARP_THREADS == 0) ? - output : - input; -} - - -/** - * Head-based segmented warp reduction test kernel - */ -template < - int WARPS, - int LOGICAL_WARP_THREADS, - typename T, - typename FlagT, - typename ReductionOp> -__global__ void WarpHeadSegmentedReduceKernel( - T *d_in, - FlagT *d_head_flags, - T *d_out, - ReductionOp reduction_op, - clock_t *d_elapsed) -{ - // Cooperative warp-reduce utility type - typedef WarpReduce WarpReduce; - - // Allocate temp storage in shared memory - __shared__ typename WarpReduce::TempStorage temp_storage[WARPS]; - - // Per-thread tile data - T input = d_in[threadIdx.x]; - FlagT head_flag = d_head_flags[threadIdx.x]; - - // Record elapsed clocks - __threadfence_block(); // workaround to prevent clock hoisting - clock_t start = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - // Test segmented warp reduce - int warp_id = threadIdx.x / LOGICAL_WARP_THREADS; - T output = DeviceTest::HeadSegmentedReduce( - temp_storage[warp_id], input, head_flag, reduction_op); - - // Record elapsed clocks - __threadfence_block(); // workaround to prevent clock hoisting - clock_t stop = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - *d_elapsed = stop - start; - - // Store aggregate - d_out[threadIdx.x] = ((threadIdx.x % LOGICAL_WARP_THREADS == 0) || head_flag) ? - output : - input; -} - - -/** - * Tail-based segmented warp reduction test kernel - */ -template < - int WARPS, - int LOGICAL_WARP_THREADS, - typename T, - typename FlagT, - typename ReductionOp> -__global__ void WarpTailSegmentedReduceKernel( - T *d_in, - FlagT *d_tail_flags, - T *d_out, - ReductionOp reduction_op, - clock_t *d_elapsed) -{ - // Cooperative warp-reduce utility type - typedef WarpReduce WarpReduce; - - // Allocate temp storage in shared memory - __shared__ typename WarpReduce::TempStorage temp_storage[WARPS]; - - // Per-thread tile data - T input = d_in[threadIdx.x]; - FlagT tail_flag = d_tail_flags[threadIdx.x]; - FlagT head_flag = (threadIdx.x == 0) ? - 0 : - d_tail_flags[threadIdx.x - 1]; - - // Record elapsed clocks - __threadfence_block(); // workaround to prevent clock hoisting - clock_t start = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - // Test segmented warp reduce - int warp_id = threadIdx.x / LOGICAL_WARP_THREADS; - T output = DeviceTest::TailSegmentedReduce( - temp_storage[warp_id], input, tail_flag, reduction_op); - - // Record elapsed clocks - __threadfence_block(); // workaround to prevent clock hoisting - clock_t stop = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - *d_elapsed = stop - start; - - // Store aggregate - d_out[threadIdx.x] = ((threadIdx.x % LOGICAL_WARP_THREADS == 0) || head_flag) ? - output : - input; -} - - -//--------------------------------------------------------------------- -// Host utility subroutines -//--------------------------------------------------------------------- - -/** - * Initialize reduction problem (and solution) - */ -template < - typename T, - typename ReductionOp> -void Initialize( - GenMode gen_mode, - int flag_entropy, - T *h_in, - int *h_flags, - int warps, - int warp_threads, - int valid_warp_threads, - ReductionOp reduction_op, - T *h_head_out, - T *h_tail_out) -{ - for (int i = 0; i < warps * warp_threads; ++i) - { - // Sample a value for this item - InitValue(gen_mode, h_in[i], i); - h_head_out[i] = h_in[i]; - h_tail_out[i] = h_in[i]; - - // Sample whether or not this item will be a segment head - char bits; - RandomBits(bits, flag_entropy); - h_flags[i] = bits & 0x1; - } - - // Accumulate segments (lane 0 of each warp is implicitly a segment head) - for (int warp = 0; warp < warps; ++warp) - { - int warp_offset = warp * warp_threads; - int item_offset = warp_offset + valid_warp_threads - 1; - - // Last item in warp - T head_aggregate = h_in[item_offset]; - T tail_aggregate = h_in[item_offset]; - - if (h_flags[item_offset]) - h_head_out[item_offset] = head_aggregate; - item_offset--; - - // Work backwards - while (item_offset >= warp_offset) - { - if (h_flags[item_offset + 1]) - { - head_aggregate = h_in[item_offset]; - } - else - { - head_aggregate = reduction_op(head_aggregate, h_in[item_offset]); - } - - if (h_flags[item_offset]) - { - h_head_out[item_offset] = head_aggregate; - h_tail_out[item_offset + 1] = tail_aggregate; - tail_aggregate = h_in[item_offset]; - } - else - { - tail_aggregate = reduction_op(tail_aggregate, h_in[item_offset]); - } - - item_offset--; - } - - // Record last segment head_aggregate to head offset - h_head_out[warp_offset] = head_aggregate; - h_tail_out[warp_offset] = tail_aggregate; - } -} - - -/** - * Test warp reduction - */ -template < - int WARPS, - int LOGICAL_WARP_THREADS, - typename T, - typename ReductionOp> -void TestReduce( - GenMode gen_mode, - ReductionOp reduction_op, - int valid_warp_threads = LOGICAL_WARP_THREADS) -{ - const int BLOCK_THREADS = LOGICAL_WARP_THREADS * WARPS; - - // Allocate host arrays - T *h_in = new T[BLOCK_THREADS]; - int *h_flags = new int[BLOCK_THREADS]; - T *h_out = new T[BLOCK_THREADS]; - T *h_tail_out = new T[BLOCK_THREADS]; - - // Initialize problem - Initialize(gen_mode, -1, h_in, h_flags, WARPS, LOGICAL_WARP_THREADS, valid_warp_threads, reduction_op, h_out, h_tail_out); - - // Initialize/clear device arrays - T *d_in = NULL; - T *d_out = NULL; - clock_t *d_elapsed = NULL; - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * BLOCK_THREADS)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * BLOCK_THREADS)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t))); - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * BLOCK_THREADS, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * BLOCK_THREADS)); - - if (g_verbose) - { - printf("Data:\n"); - for (int i = 0; i < WARPS; ++i) - DisplayResults(h_in + (i * LOGICAL_WARP_THREADS), valid_warp_threads); - } - - // Run kernel - printf("\nGen-mode %d, %d warps, %d warp threads, %d valid lanes, %s (%d bytes) elements:\n", - gen_mode, - WARPS, - LOGICAL_WARP_THREADS, - valid_warp_threads, - typeid(T).name(), - (int) sizeof(T)); - fflush(stdout); - - if (valid_warp_threads == LOGICAL_WARP_THREADS) - { - // Run full-warp kernel - FullWarpReduceKernel<<<1, BLOCK_THREADS>>>( - d_in, - d_out, - reduction_op, - d_elapsed); - } - else - { - // Run partial-warp kernel - PartialWarpReduceKernel<<<1, BLOCK_THREADS>>>( - d_in, - d_out, - reduction_op, - d_elapsed, - valid_warp_threads); - } - - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Copy out and display results - printf("\tReduction results: "); - int compare = CompareDeviceResults(h_out, d_out, BLOCK_THREADS, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - printf("\tElapsed clocks: "); - DisplayDeviceResults(d_elapsed, 1); - - // Cleanup - if (h_in) delete[] h_in; - if (h_flags) delete[] h_flags; - if (h_out) delete[] h_out; - if (h_tail_out) delete[] h_tail_out; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); -} - - -/** - * Test warp segmented reduction - */ -template < - int WARPS, - int LOGICAL_WARP_THREADS, - typename T, - typename ReductionOp> -void TestSegmentedReduce( - GenMode gen_mode, - int flag_entropy, - ReductionOp reduction_op) -{ - const int BLOCK_THREADS = LOGICAL_WARP_THREADS * WARPS; - - // Allocate host arrays - int compare; - T *h_in = new T[BLOCK_THREADS]; - int *h_flags = new int[BLOCK_THREADS]; - T *h_head_out = new T[BLOCK_THREADS]; - T *h_tail_out = new T[BLOCK_THREADS]; - - // Initialize problem - Initialize(gen_mode, flag_entropy, h_in, h_flags, WARPS, LOGICAL_WARP_THREADS, LOGICAL_WARP_THREADS, reduction_op, h_head_out, h_tail_out); - - // Initialize/clear device arrays - T *d_in = NULL; - int *d_flags = NULL; - T *d_head_out = NULL; - T *d_tail_out = NULL; - clock_t *d_elapsed = NULL; - - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * BLOCK_THREADS)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(int) * BLOCK_THREADS)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_head_out, sizeof(T) * BLOCK_THREADS)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_tail_out, sizeof(T) * BLOCK_THREADS)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t))); - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * BLOCK_THREADS, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemcpy(d_flags, h_flags, sizeof(int) * BLOCK_THREADS, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_head_out, 0, sizeof(T) * BLOCK_THREADS)); - CubDebugExit(cudaMemset(d_tail_out, 0, sizeof(T) * BLOCK_THREADS)); - - if (g_verbose) - { - printf("Data:\n"); - for (int i = 0; i < WARPS; ++i) - DisplayResults(h_in + (i * LOGICAL_WARP_THREADS), LOGICAL_WARP_THREADS); - - printf("\nFlags:\n"); - for (int i = 0; i < WARPS; ++i) - DisplayResults(h_flags + (i * LOGICAL_WARP_THREADS), LOGICAL_WARP_THREADS); - } - - printf("\nGen-mode %d, head flag entropy reduction %d, %d warps, %d warp threads, %s (%d bytes) elements:\n", - gen_mode, - flag_entropy, - WARPS, - LOGICAL_WARP_THREADS, - typeid(T).name(), - (int) sizeof(T)); - fflush(stdout); - - // Run head-based kernel - WarpHeadSegmentedReduceKernel<<<1, BLOCK_THREADS>>>( - d_in, - d_flags, - d_head_out, - reduction_op, - d_elapsed); - - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Copy out and display results - printf("\tHead-based segmented reduction results: "); - compare = CompareDeviceResults(h_head_out, d_head_out, BLOCK_THREADS, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - printf("\tElapsed clocks: "); - DisplayDeviceResults(d_elapsed, 1); - - // Run tail-based kernel - WarpTailSegmentedReduceKernel<<<1, BLOCK_THREADS>>>( - d_in, - d_flags, - d_tail_out, - reduction_op, - d_elapsed); - - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Copy out and display results - printf("\tTail-based segmented reduction results: "); - compare = CompareDeviceResults(h_tail_out, d_tail_out, BLOCK_THREADS, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - printf("\tElapsed clocks: "); - DisplayDeviceResults(d_elapsed, 1); - - // Cleanup - if (h_in) delete[] h_in; - if (h_flags) delete[] h_flags; - if (h_head_out) delete[] h_head_out; - if (h_tail_out) delete[] h_tail_out; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags)); - if (d_head_out) CubDebugExit(g_allocator.DeviceFree(d_head_out)); - if (d_tail_out) CubDebugExit(g_allocator.DeviceFree(d_tail_out)); - if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); -} - - -/** - * Run battery of tests for different full and partial tile sizes - */ -template < - int WARPS, - int LOGICAL_WARP_THREADS, - typename T, - typename ReductionOp> -void Test( - GenMode gen_mode, - ReductionOp reduction_op) -{ - // Partial tiles - for ( - int valid_warp_threads = 1; - valid_warp_threads < LOGICAL_WARP_THREADS; - valid_warp_threads += CUB_MAX(1, LOGICAL_WARP_THREADS / 5)) - { - // Without wrapper (to test non-excepting PTX POD-op specializations) - TestReduce(gen_mode, reduction_op, valid_warp_threads); - - // With wrapper to ensure no ops called on OOB lanes - WrapperFunctor wrapped_op(reduction_op, valid_warp_threads); - TestReduce(gen_mode, wrapped_op, valid_warp_threads); - } - - // Full tile - TestReduce(gen_mode, reduction_op, LOGICAL_WARP_THREADS); - - // Segmented reduction with different head flags - for (int flag_entropy = 0; flag_entropy < 10; ++flag_entropy) - { - TestSegmentedReduce(gen_mode, flag_entropy, reduction_op); - } -} - - -/** - * Run battery of tests for different data types and reduce ops - */ -template < - int WARPS, - int LOGICAL_WARP_THREADS> -void Test(GenMode gen_mode) -{ - // primitive - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - - if (gen_mode != RANDOM) - { - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - } - - // primitive (alternative reduce op) - Test( gen_mode, Max()); - Test( gen_mode, Max()); - Test( gen_mode, Max()); - Test( gen_mode, Max()); - - // vec-1 - Test( gen_mode, Sum()); - - // vec-2 - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - - // vec-4 - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); - - // complex - Test( gen_mode, Sum()); - Test( gen_mode, Sum()); -} - - -/** - * Run battery of tests for different problem generation options - */ -template < - int WARPS, - int LOGICAL_WARP_THREADS> -void Test() -{ - Test(UNIFORM); - Test(INTEGER_SEED); - Test(RANDOM); -} - - -/** - * Run battery of tests for different number of active warps - */ -template -void Test() -{ - Test<1, LOGICAL_WARP_THREADS>(); - Test<2, LOGICAL_WARP_THREADS>(); -} - - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("repeat", g_repeat); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--repeat=]" - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - -#ifdef QUICK_TEST - - // Compile/run quick tests - TestReduce<1, 32, int>(UNIFORM, Sum()); - - TestReduce<1, 32, double>(UNIFORM, Sum()); - TestReduce<2, 16, TestBar>(UNIFORM, Sum()); - TestSegmentedReduce<1, 32, int>(UNIFORM, 1, Sum()); - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - // Test logical warp sizes - Test<32>(); - Test<16>(); - Test<9>(); - Test<7>(); - } - -#endif - - return 0; -} - - - - diff --git a/ml-xgboost/cub/test/test_warp_scan.cu b/ml-xgboost/cub/test/test_warp_scan.cu deleted file mode 100644 index 48ea768..0000000 --- a/ml-xgboost/cub/test/test_warp_scan.cu +++ /dev/null @@ -1,630 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Test of WarpScan utilities - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include - -#include -#include - -#include "test_util.h" - -using namespace cub; - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -bool g_verbose = false; -int g_repeat = 0; -CachingDeviceAllocator g_allocator(true); - - -/** - * Primitive variant to test - */ -enum TestMode -{ - BASIC, - AGGREGATE, -}; - - - -/** - * \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants) - */ -template -struct WrapperFunctor -{ - OpT op; - - WrapperFunctor(OpT op) : op(op) {} - - template - __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const - { - return op(a, b); - } -}; - -//--------------------------------------------------------------------- -// Test kernels -//--------------------------------------------------------------------- - -/// Exclusive scan basic -template -__device__ __forceinline__ void DeviceTest( - WarpScanT &warp_scan, - T &data, - T &initial_value, - ScanOpT &scan_op, - T &aggregate, - Int2Type test_mode, - IsPrimitiveT is_primitive) -{ - // Test basic warp scan - warp_scan.ExclusiveScan(data, data, initial_value, scan_op); -} - -/// Exclusive scan aggregate -template < - typename WarpScanT, - typename T, - typename ScanOpT, - typename IsPrimitiveT> -__device__ __forceinline__ void DeviceTest( - WarpScanT &warp_scan, - T &data, - T &initial_value, - ScanOpT &scan_op, - T &aggregate, - Int2Type test_mode, - IsPrimitiveT is_primitive) -{ - // Test with cumulative aggregate - warp_scan.ExclusiveScan(data, data, initial_value, scan_op, aggregate); -} - - -/// Exclusive sum basic -template < - typename WarpScanT, - typename T> -__device__ __forceinline__ void DeviceTest( - WarpScanT &warp_scan, - T &data, - T &initial_value, - Sum &scan_op, - T &aggregate, - Int2Type test_mode, - Int2Type is_primitive) -{ - // Test basic warp scan - warp_scan.ExclusiveSum(data, data); -} - - -/// Exclusive sum aggregate -template < - typename WarpScanT, - typename T> -__device__ __forceinline__ void DeviceTest( - WarpScanT &warp_scan, - T &data, - T &initial_value, - Sum &scan_op, - T &aggregate, - Int2Type test_mode, - Int2Type is_primitive) -{ - // Test with cumulative aggregate - warp_scan.ExclusiveSum(data, data, aggregate); -} - - -/// Inclusive scan basic -template < - typename WarpScanT, - typename T, - typename ScanOpT, - typename IsPrimitiveT> -__device__ __forceinline__ void DeviceTest( - WarpScanT &warp_scan, - T &data, - NullType &initial_value, - ScanOpT &scan_op, - T &aggregate, - Int2Type test_mode, - IsPrimitiveT is_primitive) -{ - // Test basic warp scan - warp_scan.InclusiveScan(data, data, scan_op); -} - -/// Inclusive scan aggregate -template < - typename WarpScanT, - typename T, - typename ScanOpT, - typename IsPrimitiveT> -__device__ __forceinline__ void DeviceTest( - WarpScanT &warp_scan, - T &data, - NullType &initial_value, - ScanOpT &scan_op, - T &aggregate, - Int2Type test_mode, - IsPrimitiveT is_primitive) -{ - // Test with cumulative aggregate - warp_scan.InclusiveScan(data, data, scan_op, aggregate); -} - -/// Inclusive sum basic -template < - typename WarpScanT, - typename T, - typename InitialValueT> -__device__ __forceinline__ void DeviceTest( - WarpScanT &warp_scan, - T &data, - NullType &initial_value, - Sum &scan_op, - T &aggregate, - Int2Type test_mode, - Int2Type is_primitive) -{ - // Test basic warp scan - warp_scan.InclusiveSum(data, data); -} - -/// Inclusive sum aggregate -template < - typename WarpScanT, - typename T, - typename InitialValueT> -__device__ __forceinline__ void DeviceTest( - WarpScanT &warp_scan, - T &data, - NullType &initial_value, - Sum &scan_op, - T &aggregate, - Int2Type test_mode, - Int2Type is_primitive) -{ - // Test with cumulative aggregate - warp_scan.InclusiveSum(data, data, aggregate); -} - - -/** - * WarpScan test kernel - */ -template < - int LOGICAL_WARP_THREADS, - TestMode TEST_MODE, - typename T, - typename ScanOpT, - typename InitialValueT> -__global__ void WarpScanKernel( - T *d_in, - T *d_out, - T *d_aggregate, - ScanOpT scan_op, - InitialValueT initial_value, - clock_t *d_elapsed) -{ - // Cooperative warp-scan utility type (1 warp) - typedef WarpScan WarpScanT; - - // Allocate temp storage in shared memory - __shared__ typename WarpScanT::TempStorage temp_storage; - - // Per-thread tile data - T data = d_in[threadIdx.x]; - - // Start cycle timer - __threadfence_block(); // workaround to prevent clock hoisting - clock_t start = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - T aggregate; - - // Test scan - WarpScanT warp_scan(temp_storage); - DeviceTest( - warp_scan, - data, - initial_value, - scan_op, - aggregate, - Int2Type(), - Int2Type::PRIMITIVE>()); - - // Stop cycle timer - __threadfence_block(); // workaround to prevent clock hoisting - clock_t stop = clock(); - __threadfence_block(); // workaround to prevent clock hoisting - - // Store data - d_out[threadIdx.x] = data; - - if (TEST_MODE != BASIC) - { - // Store aggregate - d_aggregate[threadIdx.x] = aggregate; - } - - // Store time - if (threadIdx.x == 0) - { - *d_elapsed = (start > stop) ? start - stop : stop - start; - } -} - - -//--------------------------------------------------------------------- -// Host utility subroutines -//--------------------------------------------------------------------- - -/** - * Initialize exclusive-scan problem (and solution) - */ -template < - typename T, - typename ScanOpT> -T Initialize( - GenMode gen_mode, - T *h_in, - T *h_reference, - int num_items, - ScanOpT scan_op, - T initial_value) -{ - InitValue(gen_mode, h_in[0], 0); - - T block_aggregate = h_in[0]; - h_reference[0] = initial_value; - T inclusive = scan_op(initial_value, h_in[0]); - - for (int i = 1; i < num_items; ++i) - { - InitValue(gen_mode, h_in[i], i); - h_reference[i] = inclusive; - inclusive = scan_op(inclusive, h_in[i]); - block_aggregate = scan_op(block_aggregate, h_in[i]); - } - - return block_aggregate; -} - - -/** - * Initialize inclusive-scan problem (and solution) - */ -template < - typename T, - typename ScanOpT> -T Initialize( - GenMode gen_mode, - T *h_in, - T *h_reference, - int num_items, - ScanOpT scan_op, - NullType) -{ - InitValue(gen_mode, h_in[0], 0); - - T block_aggregate = h_in[0]; - T inclusive = h_in[0]; - h_reference[0] = inclusive; - - for (int i = 1; i < num_items; ++i) - { - InitValue(gen_mode, h_in[i], i); - inclusive = scan_op(inclusive, h_in[i]); - block_aggregate = scan_op(block_aggregate, h_in[i]); - h_reference[i] = inclusive; - } - - return block_aggregate; -} - - -/** - * Test warp scan - */ -template < - int LOGICAL_WARP_THREADS, - TestMode TEST_MODE, - typename T, - typename ScanOpT, - typename InitialValueT> // NullType implies inclusive-scan, otherwise inclusive scan -void Test( - GenMode gen_mode, - ScanOpT scan_op, - InitialValueT initial_value) -{ - // Allocate host arrays - T *h_in = new T[LOGICAL_WARP_THREADS]; - T *h_reference = new T[LOGICAL_WARP_THREADS]; - T *h_aggregate = new T[LOGICAL_WARP_THREADS]; - - // Initialize problem - T aggregate = Initialize( - gen_mode, - h_in, - h_reference, - LOGICAL_WARP_THREADS, - scan_op, - initial_value); - - if (g_verbose) - { - printf("Input: \n"); - DisplayResults(h_in, LOGICAL_WARP_THREADS); - printf("\n"); - } - - for (int i = 0; i < LOGICAL_WARP_THREADS; ++i) - { - h_aggregate[i] = aggregate; - } - - // Initialize/clear device arrays - T *d_in = NULL; - T *d_out = NULL; - T *d_aggregate = NULL; - clock_t *d_elapsed = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * LOGICAL_WARP_THREADS)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * (LOGICAL_WARP_THREADS + 1))); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_aggregate, sizeof(T) * LOGICAL_WARP_THREADS)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t))); - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * LOGICAL_WARP_THREADS, cudaMemcpyHostToDevice)); - CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * (LOGICAL_WARP_THREADS + 1))); - CubDebugExit(cudaMemset(d_aggregate, 0, sizeof(T) * LOGICAL_WARP_THREADS)); - - // Run kernel - printf("Test-mode %d (%s), gen-mode %d (%s), %s warpscan, %d warp threads, %s (%d bytes) elements:\n", - TEST_MODE, typeid(TEST_MODE).name(), - gen_mode, typeid(gen_mode).name(), - (Equals::VALUE) ? "Inclusive" : "Exclusive", - LOGICAL_WARP_THREADS, - typeid(T).name(), - (int) sizeof(T)); - fflush(stdout); - - // Run aggregate/prefix kernel - WarpScanKernel<<<1, LOGICAL_WARP_THREADS>>>( - d_in, - d_out, - d_aggregate, - scan_op, - initial_value, - d_elapsed); - - printf("\tElapsed clocks: "); - DisplayDeviceResults(d_elapsed, 1); - - CubDebugExit(cudaPeekAtLastError()); - CubDebugExit(cudaDeviceSynchronize()); - - // Copy out and display results - printf("\tScan results: "); - int compare = CompareDeviceResults(h_reference, d_out, LOGICAL_WARP_THREADS, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - - // Copy out and display aggregate - if (TEST_MODE == AGGREGATE) - { - printf("\tScan aggregate: "); - compare = CompareDeviceResults(h_aggregate, d_aggregate, LOGICAL_WARP_THREADS, g_verbose, g_verbose); - printf("%s\n", compare ? "FAIL" : "PASS"); - AssertEquals(0, compare); - } - - // Cleanup - if (h_in) delete[] h_in; - if (h_reference) delete[] h_reference; - if (h_aggregate) delete[] h_aggregate; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - if (d_aggregate) CubDebugExit(g_allocator.DeviceFree(d_aggregate)); - if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); -} - - -/** - * Run battery of tests for different primitive variants - */ -template < - int LOGICAL_WARP_THREADS, - typename ScanOpT, - typename T> -void Test( - GenMode gen_mode, - ScanOpT scan_op, - T initial_value) -{ - // Exclusive - Test(gen_mode, scan_op, T()); - Test(gen_mode, scan_op, T()); - - // Exclusive (non-specialized, so we can use initial-value) - Test(gen_mode, WrapperFunctor(scan_op), initial_value); - Test(gen_mode, WrapperFunctor(scan_op), initial_value); - - // Inclusive - Test(gen_mode, scan_op, NullType()); - Test(gen_mode, scan_op, NullType()); -} - - -/** - * Run battery of tests for different data types and scan ops - */ -template -void Test(GenMode gen_mode) -{ - // Get device ordinal - int device_ordinal; - CubDebugExit(cudaGetDevice(&device_ordinal)); - - // Get ptx version - int ptx_version; - CubDebugExit(PtxVersion(ptx_version)); - - // primitive - Test(gen_mode, Sum(), (char) 99); - Test(gen_mode, Sum(), (short) 99); - Test(gen_mode, Sum(), (int) 99); - Test(gen_mode, Sum(), (long) 99); - Test(gen_mode, Sum(), (long long) 99); - if (gen_mode != RANDOM) { - // Only test numerically stable inputs - Test(gen_mode, Sum(), (float) 99); - if (ptx_version > 100) - Test(gen_mode, Sum(), (double) 99); - } - - // primitive (alternative scan op) - Test(gen_mode, Max(), (unsigned char) 99); - Test(gen_mode, Max(), (unsigned short) 99); - Test(gen_mode, Max(), (unsigned int) 99); - Test(gen_mode, Max(), (unsigned long long) 99); - - // vec-2 - Test(gen_mode, Sum(), make_uchar2(17, 21)); - Test(gen_mode, Sum(), make_ushort2(17, 21)); - Test(gen_mode, Sum(), make_uint2(17, 21)); - Test(gen_mode, Sum(), make_ulong2(17, 21)); - Test(gen_mode, Sum(), make_ulonglong2(17, 21)); - if (gen_mode != RANDOM) { - // Only test numerically stable inputs - Test(gen_mode, Sum(), make_float2(17, 21)); - if (ptx_version > 100) - Test(gen_mode, Sum(), make_double2(17, 21)); - } - - // vec-4 - Test(gen_mode, Sum(), make_char4(17, 21, 32, 85)); - Test(gen_mode, Sum(), make_short4(17, 21, 32, 85)); - Test(gen_mode, Sum(), make_int4(17, 21, 32, 85)); - Test(gen_mode, Sum(), make_long4(17, 21, 32, 85)); - Test(gen_mode, Sum(), make_longlong4(17, 21, 32, 85)); - if (gen_mode != RANDOM) { - // Only test numerically stable inputs - Test(gen_mode, Sum(), make_float4(17, 21, 32, 85)); - if (ptx_version > 100) - Test(gen_mode, Sum(), make_double4(17, 21, 32, 85)); - } - - // complex - Test(gen_mode, Sum(), TestFoo::MakeTestFoo(17, 21, 32, 85)); - Test(gen_mode, Sum(), TestBar(17, 21)); - -} - - -/** - * Run battery of tests for different problem generation options - */ -template -void Test() -{ - Test(UNIFORM); - Test(INTEGER_SEED); - Test(RANDOM); -} - - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - g_verbose = args.CheckCmdLineFlag("v"); - args.GetCmdLineArgument("repeat", g_repeat); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--repeat=]" - "[--v] " - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - -#ifdef QUICK_TEST - - // Compile/run quick tests - Test<32, AGGREGATE, int>(UNIFORM, Sum(), (int) 0); - Test<32, AGGREGATE, float>(UNIFORM, Sum(), (float) 0); - Test<32, AGGREGATE, long long>(UNIFORM, Sum(), (long long) 0); - Test<32, AGGREGATE, double>(UNIFORM, Sum(), (double) 0); - - typedef KeyValuePair T; - cub::Sum sum_op; - Test<32, AGGREGATE, T>(UNIFORM, ReduceBySegmentOp(sum_op), T()); - -#else - - // Compile/run thorough tests - for (int i = 0; i <= g_repeat; ++i) - { - // Test logical warp sizes - Test<32>(); - Test<16>(); - Test<9>(); - Test<7>(); - } - -#endif - - return 0; -} - - - - diff --git a/ml-xgboost/cub/tune/.gitignore b/ml-xgboost/cub/tune/.gitignore deleted file mode 100644 index 5e56e04..0000000 --- a/ml-xgboost/cub/tune/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/bin diff --git a/ml-xgboost/cub/tune/Makefile b/ml-xgboost/cub/tune/Makefile deleted file mode 100644 index 331a356..0000000 --- a/ml-xgboost/cub/tune/Makefile +++ /dev/null @@ -1,192 +0,0 @@ -#/****************************************************************************** -# * Copyright (c) 2011, Duane Merrill. All rights reserved. -# * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. -# * -# * Redistribution and use in source and binary forms, with or without -# * modification, are permitted provided that the following conditions are met: -# * * Redistributions of source code must retain the above copyright -# * notice, this list of conditions and the following disclaimer. -# * * Redistributions in binary form must reproduce the above copyright -# * notice, this list of conditions and the following disclaimer in the -# * documentation and/or other materials provided with the distribution. -# * * Neither the name of the NVIDIA CORPORATION nor the -# * names of its contributors may be used to endorse or promote products -# * derived from this software without specific prior written permission. -# * -# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY -# * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# * -#******************************************************************************/ - -#------------------------------------------------------------------------------- -# Build script for project -#------------------------------------------------------------------------------- - -NVCC = "$(shell which nvcc)" -NVCC_VERSION = $(strip $(shell nvcc --version | grep release | sed 's/.*release //' | sed 's/,.*//')) - -# detect OS -OSUPPER = $(shell uname -s 2>/dev/null | tr [:lower:] [:upper:]) - -#------------------------------------------------------------------------------- -# Libs -#------------------------------------------------------------------------------- - - -#------------------------------------------------------------------------------- -# Includes -#------------------------------------------------------------------------------- - -INC = -I. -I.. -I../test - -#------------------------------------------------------------------------------- -# Libs -#------------------------------------------------------------------------------- - -LIBS += -lcudart - -#------------------------------------------------------------------------------- -# Defines -#------------------------------------------------------------------------------- - -DEFINES = - -#------------------------------------------------------------------------------- -# SM Arch -#------------------------------------------------------------------------------- - -ifdef sm - SM_ARCH = $(sm) -else - SM_ARCH = 200 -endif - -# Only one arch per tuning binary -ifeq (350, $(findstring 350, $(SM_ARCH))) - SM_TARGETS = -arch=sm_35 - SM_ARCH = 350 -endif -ifeq (300, $(findstring 300, $(SM_ARCH))) - SM_TARGETS = -arch=sm_30 - SM_ARCH = 300 -endif -ifeq (200, $(findstring 200, $(SM_ARCH))) - SM_TARGETS = -arch=sm_20 - SM_ARCH = 200 -endif -ifeq (130, $(findstring 130, $(SM_ARCH))) - SM_TARGETS = -arch=sm_13 - SM_ARCH = 130 -endif -ifeq (110, $(findstring 110, $(SM_ARCH))) - SM_TARGETS = -arch=sm_11 - SM_ARCH = 110 -endif -ifeq (100, $(findstring 100, $(SM_ARCH))) - SM_TARGETS = -arch=sm_10 - SM_ARCH = 100 -endif - - -#------------------------------------------------------------------------------- -# Compiler Flags -#------------------------------------------------------------------------------- - -NVCCFLAGS = -Xptxas -v -Xcudafe -\# - -# Help the compiler/linker work with huge numbers of kernels on Windows -ifeq (WIN_NT, $(findstring WIN_NT, $(OSUPPER))) - NVCCFLAGS += -Xcompiler /bigobj -Xcompiler /Zm500 -endif - -# 32/64-bit (32-bit device pointers by default) -ifeq ($(force32), 1) - CPU_ARCH = -m32 - CPU_ARCH_SUFFIX = i386 -else - CPU_ARCH = -m64 - CPU_ARCH_SUFFIX = x86_64 -endif - -# CUDA ABI enable/disable (enabled by default) -ifneq ($(abi), 0) - ABI_SUFFIX = abi -else - NVCCFLAGS += -Xptxas -abi=no - ABI_SUFFIX = noabi -endif - -# NVVM/Open64 middle-end compiler (nvvm by default) -ifeq ($(open64), 1) - NVCCFLAGS += -open64 - PTX_SUFFIX = open64 -else - PTX_SUFFIX = nvvm -endif - -# Verbose toolchain output from nvcc -ifeq ($(verbose), 1) - NVCCFLAGS += -v -endif - -# Keep intermediate compilation artifacts -ifeq ($(keep), 1) - NVCCFLAGS += -keep -endif - -# Data type size to compile a schmoo binary for -ifdef tunesize - TUNE_SIZE = $(tunesize) -else - TUNE_SIZE = 4 -endif - - -SUFFIX = $(TUNE_SIZE)B_sm$(SM_ARCH)_$(PTX_SUFFIX)_$(NVCC_VERSION)_$(ABI_SUFFIX)_$(CPU_ARCH_SUFFIX) - -#------------------------------------------------------------------------------- -# Dependency Lists -#------------------------------------------------------------------------------- - -rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) - -DEPS = ./Makefile \ - ../test/test_util.h \ - $(call rwildcard,../cub/,*.cuh) - - -#------------------------------------------------------------------------------- -# make default -#------------------------------------------------------------------------------- - -default: - - -#------------------------------------------------------------------------------- -# make clean -#------------------------------------------------------------------------------- - -clean : - rm -f bin/*$(CPU_ARCH_SUFFIX)* - rm -f *.i* *.cubin *.cu.c *.cudafe* *.fatbin.c *.ptx *.hash *.cu.cpp *.o - - - -#------------------------------------------------------------------------------- -# make tune_device_reduce -#------------------------------------------------------------------------------- - -tune_device_reduce: bin/tune_device_reduce_$(SUFFIX) - -bin/tune_device_reduce_$(SUFFIX) : tune_device_reduce.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/tune_device_reduce_$(SUFFIX) tune_device_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 -DTUNE_ARCH=$(SM_ARCH) -DTUNE_SIZE=$(TUNE_SIZE) - diff --git a/ml-xgboost/cub/tune/tune_device_reduce.cu b/ml-xgboost/cub/tune/tune_device_reduce.cu deleted file mode 100644 index da20d00..0000000 --- a/ml-xgboost/cub/tune/tune_device_reduce.cu +++ /dev/null @@ -1,763 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2011, Duane Merrill. All rights reserved. - * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -/****************************************************************************** - * Evaluates different tuning configurations of DeviceReduce. - * - * The best way to use this program: - * (1) Find the best all-around single-block tune for a given arch. - * For example, 100 samples [1 ..512], 100 timing iterations per config per sample: - * ./bin/tune_device_reduce_sm200_nvvm_5.0_abi_i386 --i=100 --s=100 --n=512 --single --device=0 - * (2) Update the single tune in device_reduce.cuh - * (3) Find the best all-around multi-block tune for a given arch. - * For example, 100 samples [single-block tile-size .. 50,331,648], 100 timing iterations per config per sample: - * ./bin/tune_device_reduce_sm200_nvvm_5.0_abi_i386 --i=100 --s=100 --device=0 - * (4) Update the multi-block tune in device_reduce.cuh - * - ******************************************************************************/ - -// Ensure printing of CUDA runtime errors to console -#define CUB_STDERR - -#include -#include -#include -#include -#include "../test/test_util.h" - -using namespace cub; -using namespace std; - - -//--------------------------------------------------------------------- -// Globals, constants and typedefs -//--------------------------------------------------------------------- - -#ifndef TUNE_ARCH -#define TUNE_ARCH 100 -#endif - -int g_max_items = 48 * 1024 * 1024; -int g_samples = 100; -int g_timing_iterations = 2; -bool g_verbose = false; -bool g_single = false; -bool g_verify = true; -CachingDeviceAllocator g_allocator; - - -//--------------------------------------------------------------------- -// Host utility subroutines -//--------------------------------------------------------------------- - -/** - * Initialize problem - */ -template -void Initialize( - GenMode gen_mode, - T *h_in, - int num_items) -{ - for (int i = 0; i < num_items; ++i) - { - InitValue(gen_mode, h_in[i], i); - } -} - -/** - * Sequential reduction - */ -template -T Reduce( - T *h_in, - ReductionOp reduction_op, - int num_items) -{ - T retval = h_in[0]; - for (int i = 1; i < num_items; ++i) - retval = reduction_op(retval, h_in[i]); - - return retval; -} - - - -//--------------------------------------------------------------------- -// Full tile test generation -//--------------------------------------------------------------------- - - - -/** - * Wrapper structure for generating and running different tuning configurations - */ -template < - typename T, - typename OffsetT, - typename ReductionOp> -struct Schmoo -{ - //--------------------------------------------------------------------- - // Types - //--------------------------------------------------------------------- - - /// Pairing of kernel function pointer and corresponding dispatch params - template - struct DispatchTuple - { - KernelPtr kernel_ptr; - DeviceReduce::KernelDispachParams params; - - float avg_throughput; - float best_avg_throughput; - OffsetT best_size; - float hmean_speedup; - - - DispatchTuple() : - kernel_ptr(0), - params(DeviceReduce::KernelDispachParams()), - avg_throughput(0.0), - best_avg_throughput(0.0), - hmean_speedup(0.0), - best_size(0) - {} - }; - - /** - * Comparison operator for DispatchTuple.avg_throughput - */ - template - static bool MinSpeedup(const Tuple &a, const Tuple &b) - { - float delta = a.hmean_speedup - b.hmean_speedup; - - return ((delta < 0.02) && (delta > -0.02)) ? - (a.best_avg_throughput < b.best_avg_throughput) : // Negligible average performance differences: defer to best performance - (a.hmean_speedup < b.hmean_speedup); - } - - - - /// Multi-block reduction kernel type and dispatch tuple type - typedef void (*MultiBlockDeviceReduceKernelPtr)(T*, T*, OffsetT, GridEvenShare, GridQueue, ReductionOp); - typedef DispatchTuple MultiDispatchTuple; - - /// Single-block reduction kernel type and dispatch tuple type - typedef void (*SingleBlockDeviceReduceKernelPtr)(T*, T*, OffsetT, ReductionOp); - typedef DispatchTuple SingleDispatchTuple; - - - //--------------------------------------------------------------------- - // Fields - //--------------------------------------------------------------------- - - vector multi_kernels; // List of generated multi-block kernels - vector single_kernels; // List of generated single-block kernels - - - //--------------------------------------------------------------------- - // Kernel enumeration methods - //--------------------------------------------------------------------- - - /** - * Must have smem that fits in the SM - * Must have vector load length that divides items per thread - */ - template - struct SmemSize - { - enum - { - BYTES = sizeof(typename BlockReduceTiles::TempStorage), - IS_OK = ((BYTES < ArchProps::SMEM_BYTES) && - (TilesReducePolicy::ITEMS_PER_THREAD % TilesReducePolicy::VECTOR_LOAD_LENGTH == 0)) - }; - }; - - - /** - * Specialization that allows kernel generation with the specified TilesReducePolicy - */ - template < - typename TilesReducePolicy, - bool IsOk = SmemSize::IS_OK> - struct Ok - { - /// Enumerate multi-block kernel and add to the list - template - static void GenerateMulti( - KernelsVector &multi_kernels, - int subscription_factor) - { - MultiDispatchTuple tuple; - tuple.params.template Init(subscription_factor); - tuple.kernel_ptr = ReducePrivatizedKernel; - multi_kernels.push_back(tuple); - } - - - /// Enumerate single-block kernel and add to the list - template - static void GenerateSingle(KernelsVector &single_kernels) - { - SingleDispatchTuple tuple; - tuple.params.template Init(); - tuple.kernel_ptr = ReduceSingleKernel; - single_kernels.push_back(tuple); - } - }; - - /** - * Specialization that rejects kernel generation with the specified TilesReducePolicy - */ - template - struct Ok - { - template - static void GenerateMulti(KernelsVector &multi_kernels, int subscription_factor) {} - - template - static void GenerateSingle(KernelsVector &single_kernels) {} - }; - - - /// Enumerate block-scheduling variations - template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int VECTOR_LOAD_LENGTH, - BlockReduceAlgorithm BLOCK_ALGORITHM, - CacheLoadModifier LOAD_MODIFIER> - void Enumerate() - { - // Multi-block kernels - Ok >::GenerateMulti(multi_kernels, 1); - Ok >::GenerateMulti(multi_kernels, 2); - Ok >::GenerateMulti(multi_kernels, 4); - Ok >::GenerateMulti(multi_kernels, 8); -#if TUNE_ARCH >= 200 - Ok >::GenerateMulti(multi_kernels, 1); -#endif - - // Single-block kernels - Ok >::GenerateSingle(single_kernels); - } - - - /// Enumerate load modifier variations - template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int VECTOR_LOAD_LENGTH, - BlockReduceAlgorithm BLOCK_ALGORITHM> - void Enumerate() - { - Enumerate(); -#if TUNE_ARCH >= 350 - Enumerate(); -#endif - } - - - /// Enumerate block algorithms - template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD, - int VECTOR_LOAD_LENGTH> - void Enumerate() - { - Enumerate(); - Enumerate(); - } - - - /// Enumerate vectorization variations - template < - int BLOCK_THREADS, - int ITEMS_PER_THREAD> - void Enumerate() - { - Enumerate(); - Enumerate(); - Enumerate(); - } - - - /// Enumerate thread-granularity variations - template - void Enumerate() - { - Enumerate(); - Enumerate(); - Enumerate(); - - Enumerate(); - Enumerate(); - Enumerate(); - - Enumerate(); - Enumerate(); - Enumerate(); - - Enumerate(); - Enumerate(); - Enumerate(); - - Enumerate(); - Enumerate(); - Enumerate(); - } - - - /// Enumerate block size variations - void Enumerate() - { - printf("\nEnumerating kernels\n"); fflush(stdout); - - Enumerate<32>(); - Enumerate<64>(); - Enumerate<96>(); - Enumerate<128>(); - Enumerate<160>(); - Enumerate<192>(); - Enumerate<256>(); - Enumerate<512>(); - } - - - //--------------------------------------------------------------------- - // Test methods - //--------------------------------------------------------------------- - - /** - * Test a configuration - */ - void TestConfiguration( - MultiDispatchTuple &multi_dispatch, - SingleDispatchTuple &single_dispatch, - T* d_in, - T* d_out, - T* h_reference, - OffsetT num_items, - ReductionOp reduction_op) - { - // Clear output - if (g_verify) CubDebugExit(cudaMemset(d_out, 0, sizeof(T))); - - // Allocate temporary storage - void *d_temp_storage = NULL; - size_t temp_storage_bytes = 0; - CubDebugExit(DeviceReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - multi_dispatch.kernel_ptr, - single_dispatch.kernel_ptr, - FillAndResetDrainKernel, - multi_dispatch.params, - single_dispatch.params, - d_in, - d_out, - num_items, - reduction_op)); - CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); - - // Warmup/correctness iteration - CubDebugExit(DeviceReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - multi_dispatch.kernel_ptr, - single_dispatch.kernel_ptr, - FillAndResetDrainKernel, - multi_dispatch.params, - single_dispatch.params, - d_in, - d_out, - num_items, - reduction_op)); - - if (g_verify) CubDebugExit(cudaDeviceSynchronize()); - - // Copy out and display results - int compare = (g_verify) ? - CompareDeviceResults(h_reference, d_out, 1, true, false) : - 0; - - // Performance - GpuTimer gpu_timer; - float elapsed_millis = 0.0; - for (int i = 0; i < g_timing_iterations; i++) - { - gpu_timer.Start(); - - CubDebugExit(DeviceReduce::Dispatch( - d_temp_storage, - temp_storage_bytes, - multi_dispatch.kernel_ptr, - single_dispatch.kernel_ptr, - FillAndResetDrainKernel, - multi_dispatch.params, - single_dispatch.params, - d_in, - d_out, - num_items, - reduction_op)); - - gpu_timer.Stop(); - elapsed_millis += gpu_timer.ElapsedMillis(); - } - - // Mooch - CubDebugExit(cudaDeviceSynchronize()); - - float avg_elapsed = elapsed_millis / g_timing_iterations; - float avg_throughput = float(num_items) / avg_elapsed / 1000.0 / 1000.0; - float avg_bandwidth = avg_throughput * sizeof(T); - - multi_dispatch.avg_throughput = CUB_MAX(avg_throughput, multi_dispatch.avg_throughput); - if (avg_throughput > multi_dispatch.best_avg_throughput) - { - multi_dispatch.best_avg_throughput = avg_throughput; - multi_dispatch.best_size = num_items; - } - - single_dispatch.avg_throughput = CUB_MAX(avg_throughput, single_dispatch.avg_throughput); - if (avg_throughput > single_dispatch.best_avg_throughput) - { - single_dispatch.best_avg_throughput = avg_throughput; - single_dispatch.best_size = num_items; - } - - if (g_verbose) - { - printf("\t%.2f GB/s, multi_dispatch( ", avg_bandwidth); - multi_dispatch.params.Print(); - printf(" ), single_dispatch( "); - single_dispatch.params.Print(); - printf(" )\n"); - fflush(stdout); - } - - AssertEquals(0, compare); - - // Cleanup temporaries - if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); - } - - - /** - * Evaluate multi-block configurations - */ - void TestMulti( - T* h_in, - T* d_in, - T* d_out, - ReductionOp reduction_op) - { - // Simple single kernel tuple for use with multi kernel sweep - typedef typename DeviceReduce::TunedPolicies::SinglePolicy SimpleSinglePolicy; - SingleDispatchTuple simple_single_tuple; - simple_single_tuple.params.template Init(); - simple_single_tuple.kernel_ptr = ReduceSingleKernel; - - double max_exponent = log2(double(g_max_items)); - double min_exponent = log2(double(simple_single_tuple.params.tile_size)); - unsigned int max_int = (unsigned int) -1; - - for (int sample = 0; sample < g_samples; ++sample) - { - printf("\nMulti-block sample %d, ", sample); - - int num_items; - if (sample == 0) - { - // First sample: use max items - num_items = g_max_items; - printf("num_items: %d", num_items); fflush(stdout); - } - else - { - // Sample a problem size from [2^g_min_exponent, g_max_items]. First 2/3 of the samples are log-distributed, the other 1/3 are uniformly-distributed. - unsigned int bits; - RandomBits(bits); - double scale = double(bits) / max_int; - - if (sample < g_samples / 2) - { - // log bias - double exponent = ((max_exponent - min_exponent) * scale) + min_exponent; - num_items = pow(2.0, exponent); - num_items = CUB_MIN(num_items, g_max_items); - printf("num_items: %d (2^%.2f)", num_items, exponent); fflush(stdout); - } - else - { - // uniform bias - num_items = CUB_MAX(pow(2.0, min_exponent), scale * g_max_items); - num_items = CUB_MIN(num_items, g_max_items); - printf("num_items: %d (%.2f * %d)", num_items, scale, g_max_items); fflush(stdout); - } - } - if (g_verbose) - printf("\n"); - else - printf(", "); - - // Compute reference - T h_reference = Reduce(h_in, reduction_op, num_items); - - // Run test on each multi-kernel configuration - float best_avg_throughput = 0.0; - for (int j = 0; j < multi_kernels.size(); ++j) - { - multi_kernels[j].avg_throughput = 0.0; - - TestConfiguration(multi_kernels[j], simple_single_tuple, d_in, d_out, &h_reference, num_items, reduction_op); - - best_avg_throughput = CUB_MAX(best_avg_throughput, multi_kernels[j].avg_throughput); - } - - // Print best throughput for this problem size - printf("Best: %.2fe9 items/s (%.2f GB/s)\n", best_avg_throughput, best_avg_throughput * sizeof(T)); - - // Accumulate speedup (inverse for harmonic mean) - for (int j = 0; j < multi_kernels.size(); ++j) - multi_kernels[j].hmean_speedup += best_avg_throughput / multi_kernels[j].avg_throughput; - } - - // Find max overall throughput and compute hmean speedups - float overall_max_throughput = 0.0; - for (int j = 0; j < multi_kernels.size(); ++j) - { - overall_max_throughput = CUB_MAX(overall_max_throughput, multi_kernels[j].best_avg_throughput); - multi_kernels[j].hmean_speedup = float(g_samples) / multi_kernels[j].hmean_speedup; - } - - // Sort by cumulative speedup - sort(multi_kernels.begin(), multi_kernels.end(), MinSpeedup); - - // Print ranked multi configurations - printf("\nRanked multi_kernels:\n"); - for (int j = 0; j < multi_kernels.size(); ++j) - { - printf("\t (%d) params( ", multi_kernels.size() - j); - multi_kernels[j].params.Print(); - printf(" ) hmean speedup: %.3f, best throughput %.2f @ %d elements (%.2f GB/s, %.2f%%)\n", - multi_kernels[j].hmean_speedup, - multi_kernels[j].best_avg_throughput, - (int) multi_kernels[j].best_size, - multi_kernels[j].best_avg_throughput * sizeof(T), - multi_kernels[j].best_avg_throughput / overall_max_throughput); - } - - printf("\nMax multi-block throughput %.2f (%.2f GB/s)\n", overall_max_throughput, overall_max_throughput * sizeof(T)); - } - - - /** - * Evaluate single-block configurations - */ - void TestSingle( - T* h_in, - T* d_in, - T* d_out, - ReductionOp reduction_op) - { - // Construct a NULL-ptr multi-kernel tuple that forces a single-kernel pass - MultiDispatchTuple multi_tuple; - - double max_exponent = log2(double(g_max_items)); - unsigned int max_int = (unsigned int) -1; - - for (int sample = 0; sample < g_samples; ++sample) - { - printf("\nSingle-block sample %d, ", sample); - - int num_items; - if (sample == 0) - { - // First sample: use max items - num_items = g_max_items; - printf("num_items: %d", num_items); fflush(stdout); - } - else - { - // Sample a problem size from [2, g_max_items], log-distributed - unsigned int bits; - RandomBits(bits); - double scale = double(bits) / max_int; - double exponent = ((max_exponent - 1) * scale) + 1; - num_items = pow(2.0, exponent); - printf("num_items: %d (2^%.2f)", num_items, exponent); fflush(stdout); - } - - if (g_verbose) - printf("\n"); - else - printf(", "); - - // Compute reference - T h_reference = Reduce(h_in, reduction_op, num_items); - - // Run test on each single-kernel configuration (pick first multi-config to use, which shouldn't be - float best_avg_throughput = 0.0; - for (int j = 0; j < single_kernels.size(); ++j) - { - single_kernels[j].avg_throughput = 0.0; - - TestConfiguration(multi_tuple, single_kernels[j], d_in, d_out, &h_reference, num_items, reduction_op); - - best_avg_throughput = CUB_MAX(best_avg_throughput, single_kernels[j].avg_throughput); - } - - // Print best throughput for this problem size - printf("Best: %.2fe9 items/s (%.2f GB/s)\n", best_avg_throughput, best_avg_throughput * sizeof(T)); - - // Accumulate speedup (inverse for harmonic mean) - for (int j = 0; j < single_kernels.size(); ++j) - single_kernels[j].hmean_speedup += best_avg_throughput / single_kernels[j].avg_throughput; - } - - // Find max overall throughput and compute hmean speedups - float overall_max_throughput = 0.0; - for (int j = 0; j < single_kernels.size(); ++j) - { - overall_max_throughput = CUB_MAX(overall_max_throughput, single_kernels[j].best_avg_throughput); - single_kernels[j].hmean_speedup = float(g_samples) / single_kernels[j].hmean_speedup; - } - - // Sort by cumulative speedup - sort(single_kernels.begin(), single_kernels.end(), MinSpeedup); - - // Print ranked single configurations - printf("\nRanked single_kernels:\n"); - for (int j = 0; j < single_kernels.size(); ++j) - { - printf("\t (%d) params( ", single_kernels.size() - j); - single_kernels[j].params.Print(); - printf(" ) hmean speedup: %.3f, best throughput %.2f @ %d elements (%.2f GB/s, %.2f%%)\n", - single_kernels[j].hmean_speedup, - single_kernels[j].best_avg_throughput, - (int) single_kernels[j].best_size, - single_kernels[j].best_avg_throughput * sizeof(T), - single_kernels[j].best_avg_throughput / overall_max_throughput); - } - - printf("\nMax single-block throughput %.2f (%.2f GB/s)\n", overall_max_throughput, overall_max_throughput * sizeof(T)); - } - -}; - - - -//--------------------------------------------------------------------- -// Main -//--------------------------------------------------------------------- - -/** - * Main - */ -int main(int argc, char** argv) -{ - // Initialize command line - CommandLineArgs args(argc, argv); - args.GetCmdLineArgument("n", g_max_items); - args.GetCmdLineArgument("s", g_samples); - args.GetCmdLineArgument("i", g_timing_iterations); - g_verbose = args.CheckCmdLineFlag("v"); - g_single = args.CheckCmdLineFlag("single"); - g_verify = !args.CheckCmdLineFlag("noverify"); - - // Print usage - if (args.CheckCmdLineFlag("help")) - { - printf("%s " - "[--device=] " - "[--n=]" - "[--s=]" - "[--i=]" - "[--single]" - "[--v]" - "[--noverify]" - "\n", argv[0]); - exit(0); - } - - // Initialize device - CubDebugExit(args.DeviceInit()); - -#if (TUNE_SIZE == 1) - typedef unsigned char T; -#elif (TUNE_SIZE == 2) - typedef unsigned short T; -#elif (TUNE_SIZE == 4) - typedef unsigned int T; -#elif (TUNE_SIZE == 8) - typedef unsigned long long T; -#else - // Default - typedef unsigned int T; -#endif - - typedef unsigned int OffsetT; - Sum reduction_op; - - // Enumerate kernels - Schmoo schmoo; - schmoo.Enumerate(); - - // Allocate host arrays - T *h_in = new T[g_max_items]; - - // Initialize problem - Initialize(UNIFORM, h_in, g_max_items); - - // Initialize device arrays - T *d_in = NULL; - T *d_out = NULL; - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * g_max_items)); - CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1)); - CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * g_max_items, cudaMemcpyHostToDevice)); - - // Test kernels - if (g_single) - schmoo.TestSingle(h_in, d_in, d_out, reduction_op); - else - schmoo.TestMulti(h_in, d_in, d_out, reduction_op); - - // Cleanup - if (h_in) delete[] h_in; - if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); - if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); - - return 0; -} - - - diff --git a/ml-xgboost/demo/.gitignore b/ml-xgboost/demo/.gitignore deleted file mode 100644 index ee79c70..0000000 --- a/ml-xgboost/demo/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.libsvm -*.pkl diff --git a/ml-xgboost/demo/README.md b/ml-xgboost/demo/README.md deleted file mode 100644 index e11ef57..0000000 --- a/ml-xgboost/demo/README.md +++ /dev/null @@ -1,147 +0,0 @@ -Awesome XGBoost -=============== -This page contains a curated list of examples, tutorials, blogs about XGBoost usecases. -It is inspired by [awesome-MXNet](https://github.com/dmlc/mxnet/blob/master/example/README.md), -[awesome-php](https://github.com/ziadoz/awesome-php) and [awesome-machine-learning](https://github.com/josephmisiti/awesome-machine-learning). - -Please send a pull request if you find things that belongs to here. - -Contents --------- -- [Code Examples](#code-examples) - - [Features Walkthrough](#features-walkthrough) - - [Basic Examples by Tasks](#basic-examples-by-tasks) - - [Benchmarks](#benchmarks) -- [Machine Learning Challenge Winning Solutions](#machine-learning-challenge-winning-solutions) -- [Tutorials](#tutorials) -- [Usecases](#usecases) -- [Tools using XGBoost](#tools-using-xgboost) -- [Integrations with 3rd party software](#integrations-with-3rd-party-software) -- [Awards](#awards) -- [Windows Binaries](#windows-binaries) - -Code Examples -------------- -### Features Walkthrough - -This is a list of short codes introducing different functionalities of xgboost packages. - -* Basic walkthrough of packages - [python](guide-python/basic_walkthrough.py) - [R](../R-package/demo/basic_walkthrough.R) - [Julia](https://github.com/antinucleon/XGBoost.jl/blob/master/demo/basic_walkthrough.jl) - [PHP](https://github.com/bpachev/xgboost-php/blob/master/demo/titanic_demo.php) -* Customize loss function, and evaluation metric - [python](guide-python/custom_objective.py) - [R](../R-package/demo/custom_objective.R) - [Julia](https://github.com/antinucleon/XGBoost.jl/blob/master/demo/custom_objective.jl) -* Boosting from existing prediction - [python](guide-python/boost_from_prediction.py) - [R](../R-package/demo/boost_from_prediction.R) - [Julia](https://github.com/antinucleon/XGBoost.jl/blob/master/demo/boost_from_prediction.jl) -* Predicting using first n trees - [python](guide-python/predict_first_ntree.py) - [R](../R-package/demo/predict_first_ntree.R) - [Julia](https://github.com/antinucleon/XGBoost.jl/blob/master/demo/predict_first_ntree.jl) -* Generalized Linear Model - [python](guide-python/generalized_linear_model.py) - [R](../R-package/demo/generalized_linear_model.R) - [Julia](https://github.com/antinucleon/XGBoost.jl/blob/master/demo/generalized_linear_model.jl) -* Cross validation - [python](guide-python/cross_validation.py) - [R](../R-package/demo/cross_validation.R) - [Julia](https://github.com/antinucleon/XGBoost.jl/blob/master/demo/cross_validation.jl) -* Predicting leaf indices - [python](guide-python/predict_leaf_indices.py) - [R](../R-package/demo/predict_leaf_indices.R) - -### Basic Examples by Tasks - -Most of examples in this section are based on CLI or python version. -However, the parameter settings can be applied to all versions - -- [Binary classification](binary_classification) -- [Multiclass classification](multiclass_classification) -- [Regression](regression) -- [Learning to Rank](rank) - -### Benchmarks - -- [Starter script for Kaggle Higgs Boson](kaggle-higgs) -- [Kaggle Tradeshift winning solution by daxiongshu](https://github.com/daxiongshu/kaggle-tradeshift-winning-solution) -- [Benchmarking the most commonly used open source tools for binary classification](https://github.com/szilard/benchm-ml#boosting-gradient-boosted-treesgradient-boosting-machines) - - -## Machine Learning Challenge Winning Solutions - -XGBoost is extensively used by machine learning practitioners to create state of art data science solutions, -this is a list of machine learning winning solutions with XGBoost. -Please send pull requests if you find ones that are missing here. - -- Maksims Volkovs, Guangwei Yu and Tomi Poutanen, 1st place of the [2017 ACM RecSys challenge](http://2017.recsyschallenge.com/). Link to [paper](http://www.cs.toronto.edu/~mvolkovs/recsys2017_challenge.pdf). -- Vlad Sandulescu, Mihai Chiru, 1st place of the [KDD Cup 2016 competition](https://kddcup2016.azurewebsites.net). Link to [the arxiv paper](http://arxiv.org/abs/1609.02728). -- Marios Michailidis, Mathias Müller and HJ van Veen, 1st place of the [Dato Truely Native? competition](https://www.kaggle.com/c/dato-native). Link to [the Kaggle interview](http://blog.kaggle.com/2015/12/03/dato-winners-interview-1st-place-mad-professors/). -- Vlad Mironov, Alexander Guschin, 1st place of the [CERN LHCb experiment Flavour of Physics competition](https://www.kaggle.com/c/flavours-of-physics). Link to [the Kaggle interview](http://blog.kaggle.com/2015/11/30/flavour-of-physics-technical-write-up-1st-place-go-polar-bears/). -- Josef Slavicek, 3rd place of the [CERN LHCb experiment Flavour of Physics competition](https://www.kaggle.com/c/flavours-of-physics). Link to [the Kaggle interview](http://blog.kaggle.com/2015/11/23/flavour-of-physics-winners-interview-3rd-place-josef-slavicek/). -- Mario Filho, Josef Feigl, Lucas, Gilberto, 1st place of the [Caterpillar Tube Pricing competition](https://www.kaggle.com/c/caterpillar-tube-pricing). Link to [the Kaggle interview](http://blog.kaggle.com/2015/09/22/caterpillar-winners-interview-1st-place-gilberto-josef-leustagos-mario/). -- Qingchen Wang, 1st place of the [Liberty Mutual Property Inspection](https://www.kaggle.com/c/liberty-mutual-group-property-inspection-prediction). Link to [the Kaggle interview](http://blog.kaggle.com/2015/09/28/liberty-mutual-property-inspection-winners-interview-qingchen-wang/). -- Chenglong Chen, 1st place of the [Crowdflower Search Results Relevance](https://www.kaggle.com/c/crowdflower-search-relevance). Link to [the winning solution](https://www.kaggle.com/c/crowdflower-search-relevance/forums/t/15186/1st-place-winner-solution-chenglong-chen/). -- Alexandre Barachant (“Cat”) and Rafał Cycoń (“Dog”), 1st place of the [Grasp-and-Lift EEG Detection](https://www.kaggle.com/c/grasp-and-lift-eeg-detection). Link to [the Kaggle interview](http://blog.kaggle.com/2015/10/12/grasp-and-lift-eeg-winners-interview-1st-place-cat-dog/). -- Halla Yang, 2nd place of the [Recruit Coupon Purchase Prediction Challenge](https://www.kaggle.com/c/coupon-purchase-prediction). Link to [the Kaggle interview](http://blog.kaggle.com/2015/10/21/recruit-coupon-purchase-winners-interview-2nd-place-halla-yang/). -- Owen Zhang, 1st place of the [Avito Context Ad Clicks competition](https://www.kaggle.com/c/avito-context-ad-clicks). Link to [the Kaggle interview](http://blog.kaggle.com/2015/08/26/avito-winners-interview-1st-place-owen-zhang/). -- Keiichi Kuroyanagi, 2nd place of the [Airbnb New User Bookings](https://www.kaggle.com/c/airbnb-recruiting-new-user-bookings). Link to [the Kaggle interview](http://blog.kaggle.com/2016/03/17/airbnb-new-user-bookings-winners-interview-2nd-place-keiichi-kuroyanagi-keiku/). -- Marios Michailidis, Mathias Müller and Ning Situ, 1st place [Homesite Quote Conversion](https://www.kaggle.com/c/homesite-quote-conversion). Link to [the Kaggle interview](http://blog.kaggle.com/2016/04/08/homesite-quote-conversion-winners-write-up-1st-place-kazanova-faron-clobber/). - -## Talks -- [XGBoost: A Scalable Tree Boosting System](http://datascience.la/xgboost-workshop-and-meetup-talk-with-tianqi-chen/) (video+slides) by Tianqi Chen at the Los Angeles Data Science meetup - -## Tutorials - -- [Machine Learning with XGBoost on Qubole Spark Cluster](https://www.qubole.com/blog/machine-learning-xgboost-qubole-spark-cluster/) -- [XGBoost Official RMarkdown Tutorials](https://xgboost.readthedocs.org/en/latest/R-package/index.html#tutorials) -- [An Introduction to XGBoost R Package](http://dmlc.ml/rstats/2016/03/10/xgboost.html) by Tong He -- [Open Source Tools & Data Science Competitions](http://www.slideshare.net/odsc/owen-zhangopen-sourcetoolsanddscompetitions1) by Owen Zhang - XGBoost parameter tuning tips -* [Feature Importance Analysis with XGBoost in Tax audit](http://fr.slideshare.net/MichaelBENESTY/feature-importance-analysis-with-xgboost-in-tax-audit) -* [Winning solution of Kaggle Higgs competition: what a single model can do](http://no2147483647.wordpress.com/2014/09/17/winning-solution-of-kaggle-higgs-competition-what-a-single-model-can-do/) -- [XGBoost - eXtreme Gradient Boosting](http://www.slideshare.net/ShangxuanZhang/xgboost) by Tong He -- [How to use XGBoost algorithm in R in easy steps](http://www.analyticsvidhya.com/blog/2016/01/xgboost-algorithm-easy-steps/) by TAVISH SRIVASTAVA ([Chinese Translation 中文翻译](https://segmentfault.com/a/1190000004421821) by [HarryZhu](https://segmentfault.com/u/harryprince)) -- [Kaggle Solution: What’s Cooking ? (Text Mining Competition)](http://www.analyticsvidhya.com/blog/2015/12/kaggle-solution-cooking-text-mining-competition/) by MANISH SARASWAT -- Better Optimization with Repeated Cross Validation and the XGBoost model - Machine Learning with R) by Manuel Amunategui ([Youtube Link](https://www.youtube.com/watch?v=Og7CGAfSr_Y)) ([Github Link](https://github.com/amunategui/BetterCrossValidation)) -- [XGBoost Rossman Parameter Tuning](https://www.kaggle.com/khozzy/rossmann-store-sales/xgboost-parameter-tuning-template/run/90168/notebook) by [Norbert Kozlowski](https://www.kaggle.com/khozzy) -- [Featurizing log data before XGBoost](http://www.slideshare.net/DataRobot/featurizing-log-data-before-xgboost) by Xavier Conort, Owen Zhang etc -- [West Nile Virus Competition Benchmarks & Tutorials](http://blog.kaggle.com/2015/07/21/west-nile-virus-competition-benchmarks-tutorials/) by [Anna Montoya](http://blog.kaggle.com/author/annamontoya/) -- [Ensemble Decision Tree with XGBoost](https://www.kaggle.com/binghsu/predict-west-nile-virus/xgboost-starter-code-python-0-69) by [Bing Xu](https://www.kaggle.com/binghsu) -- [Notes on eXtreme Gradient Boosting](http://startup.ml/blog/xgboost) by ARSHAK NAVRUZYAN ([iPython Notebook](https://github.com/startupml/koan/blob/master/eXtreme%20Gradient%20Boosting.ipynb)) -- [Complete Guide to Parameter Tuning in XGBoost](http://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/) by Aarshay Jain -- [Practical XGBoost in Python online course](http://education.parrotprediction.teachable.com/courses/practical-xgboost-in-python) by Parrot Prediction -- [Spark and XGBoost using Scala](http://www.elenacuoco.com/2016/10/10/scala-spark-xgboost-classification/) by Elena Cuoco - -## Usecases -If you have particular usecase of xgboost that you would like to highlight. -Send a PR to add a one sentence description:) - -- XGBoost is used in [Kaggle Script](https://www.kaggle.com/scripts) to solve data science challenges. -- Distribute XGBoost as Rest API server from Jupyter notebook with [BentoML](https://github.com/bentoml/bentoml). [Link to notebook](https://github.com/bentoml/BentoML/blob/master/examples/xgboost-predict-titanic-survival/XGBoost-titanic-survival-prediction.ipynb) -- [Seldon predictive service powered by XGBoost](http://docs.seldon.io/iris-demo.html) -- XGBoost Distributed is used in [ODPS Cloud Service by Alibaba](https://yq.aliyun.com/articles/6355) (in Chinese) -- XGBoost is incoporated as part of [Graphlab Create](https://dato.com/products/create/) for scalable machine learning. -- [Hanjing Su](https://www.52cs.org) from Tencent data platform team: "We use distributed XGBoost for click through prediction in wechat shopping and lookalikes. The problems involve hundreds millions of users and thousands of features. XGBoost is cleanly designed and can be easily integrated into our production environment, reducing our cost in developments." -- [CNevd](https://github.com/CNevd) from autohome.com ad platform team: "Distributed XGBoost is used for click through rate prediction in our display advertising, XGBoost is highly efficient and flexible and can be easily used on our distributed platform, our ctr made a great improvement with hundred millions samples and millions features due to this awesome XGBoost" - -## Tools using XGBoost - -- [BayesBoost](https://github.com/mpearmain/BayesBoost) - Bayesian Optimization using xgboost and sklearn API -- [gp_xgboost_gridsearch](https://github.com/vatsan/gp_xgboost_gridsearch) - In-database parallel grid-search for XGBoost on [Greenplum](https://github.com/greenplum-db/gpdb) using PL/Python -- [tpot](https://github.com/rhiever/tpot) - A Python tool that automatically creates and optimizes machine learning pipelines using genetic programming. - -## Integrations with 3rd party software -Open source integrations with XGBoost: -* [Neptune.ai](http://neptune.ai/) - Experiment management and collaboration tool for ML/DL/RL specialists. Integration has a form of the [XGBoost callback](https://docs.neptune.ai/integrations/xgboost.html) that automatically logs training and evaluation metrics, as well as saved model (booster), feature importance chart and visualized trees. -* [Optuna](https://optuna.org/) - An open source hyperparameter optimization framework to automate hyperparameter search. Optuna integrates with XGBoost in the [XGBoostPruningCallback](https://optuna.readthedocs.io/en/stable/reference/integration.html#optuna.integration.XGBoostPruningCallback) that let users easily prune unpromising trials. - -## Awards -- [John Chambers Award](http://stat-computing.org/awards/jmc/winners.html) - 2016 Winner: XGBoost R Package, by Tong He (Simon Fraser University) and Tianqi Chen (University of Washington) -- [InfoWorld’s 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html) - -## Windows Binaries -Unofficial windows binaries and instructions on how to use them are hosted on [Guido Tapia's blog](http://www.picnet.com.au/blogs/guido/post/2016/09/22/xgboost-windows-x64-binaries-for-download/) diff --git a/ml-xgboost/demo/aft_survival/aft_survival_demo.py b/ml-xgboost/demo/aft_survival/aft_survival_demo.py deleted file mode 100644 index 3cdccc1..0000000 --- a/ml-xgboost/demo/aft_survival/aft_survival_demo.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -Demo for survival analysis (regression) using Accelerated Failure Time (AFT) model -""" -from sklearn.model_selection import ShuffleSplit -import pandas as pd -import numpy as np -import xgboost as xgb - -# The Veterans' Administration Lung Cancer Trial -# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980) -df = pd.read_csv('../data/veterans_lung_cancer.csv') -print('Training data:') -print(df) - -# Split features and labels -y_lower_bound = df['Survival_label_lower_bound'] -y_upper_bound = df['Survival_label_upper_bound'] -X = df.drop(['Survival_label_lower_bound', 'Survival_label_upper_bound'], axis=1) - -# Split data into training and validation sets -rs = ShuffleSplit(n_splits=2, test_size=.7, random_state=0) -train_index, valid_index = next(rs.split(X)) -dtrain = xgb.DMatrix(X.values[train_index, :]) -dtrain.set_float_info('label_lower_bound', y_lower_bound[train_index]) -dtrain.set_float_info('label_upper_bound', y_upper_bound[train_index]) -dvalid = xgb.DMatrix(X.values[valid_index, :]) -dvalid.set_float_info('label_lower_bound', y_lower_bound[valid_index]) -dvalid.set_float_info('label_upper_bound', y_upper_bound[valid_index]) - -# Train gradient boosted trees using AFT loss and metric -params = {'verbosity': 0, - 'objective': 'survival:aft', - 'eval_metric': 'aft-nloglik', - 'tree_method': 'hist', - 'learning_rate': 0.05, - 'aft_loss_distribution': 'normal', - 'aft_loss_distribution_scale': 1.20, - 'max_depth': 6, - 'lambda': 0.01, - 'alpha': 0.02} -bst = xgb.train(params, dtrain, num_boost_round=10000, - evals=[(dtrain, 'train'), (dvalid, 'valid')], - early_stopping_rounds=50) - -# Run prediction on the validation set -df = pd.DataFrame({'Label (lower bound)': y_lower_bound[valid_index], - 'Label (upper bound)': y_upper_bound[valid_index], - 'Predicted label': bst.predict(dvalid)}) -print(df) -# Show only data points with right-censored labels -print(df[np.isinf(df['Label (upper bound)'])]) - -# Save trained model -bst.save_model('aft_model.json') diff --git a/ml-xgboost/demo/aft_survival/aft_survival_demo_with_optuna.py b/ml-xgboost/demo/aft_survival/aft_survival_demo_with_optuna.py deleted file mode 100644 index 117be8b..0000000 --- a/ml-xgboost/demo/aft_survival/aft_survival_demo_with_optuna.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -Demo for survival analysis (regression) using Accelerated Failure Time (AFT) model, using Optuna -to tune hyperparameters -""" -from sklearn.model_selection import ShuffleSplit -import pandas as pd -import numpy as np -import xgboost as xgb -import optuna - -# The Veterans' Administration Lung Cancer Trial -# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980) -df = pd.read_csv('../data/veterans_lung_cancer.csv') -print('Training data:') -print(df) - -# Split features and labels -y_lower_bound = df['Survival_label_lower_bound'] -y_upper_bound = df['Survival_label_upper_bound'] -X = df.drop(['Survival_label_lower_bound', 'Survival_label_upper_bound'], axis=1) - -# Split data into training and validation sets -rs = ShuffleSplit(n_splits=2, test_size=.7, random_state=0) -train_index, valid_index = next(rs.split(X)) -dtrain = xgb.DMatrix(X.values[train_index, :]) -dtrain.set_float_info('label_lower_bound', y_lower_bound[train_index]) -dtrain.set_float_info('label_upper_bound', y_upper_bound[train_index]) -dvalid = xgb.DMatrix(X.values[valid_index, :]) -dvalid.set_float_info('label_lower_bound', y_lower_bound[valid_index]) -dvalid.set_float_info('label_upper_bound', y_upper_bound[valid_index]) - -# Define hyperparameter search space -base_params = {'verbosity': 0, - 'objective': 'survival:aft', - 'eval_metric': 'aft-nloglik', - 'tree_method': 'hist'} # Hyperparameters common to all trials -def objective(trial): - params = {'learning_rate': trial.suggest_loguniform('learning_rate', 0.01, 1.0), - 'aft_loss_distribution': trial.suggest_categorical('aft_loss_distribution', - ['normal', 'logistic', 'extreme']), - 'aft_loss_distribution_scale': trial.suggest_loguniform('aft_loss_distribution_scale', 0.1, 10.0), - 'max_depth': trial.suggest_int('max_depth', 3, 8), - 'lambda': trial.suggest_loguniform('lambda', 1e-8, 1.0), - 'alpha': trial.suggest_loguniform('alpha', 1e-8, 1.0)} # Search space - params.update(base_params) - pruning_callback = optuna.integration.XGBoostPruningCallback(trial, 'valid-aft-nloglik') - bst = xgb.train(params, dtrain, num_boost_round=10000, - evals=[(dtrain, 'train'), (dvalid, 'valid')], - early_stopping_rounds=50, verbose_eval=False, callbacks=[pruning_callback]) - if bst.best_iteration >= 25: - return bst.best_score - else: - return np.inf # Reject models with < 25 trees - -# Run hyperparameter search -study = optuna.create_study(direction='minimize') -study.optimize(objective, n_trials=200) -print('Completed hyperparameter tuning with best aft-nloglik = {}.'.format(study.best_trial.value)) -params = {} -params.update(base_params) -params.update(study.best_trial.params) - -# Re-run training with the best hyperparameter combination -print('Re-running the best trial... params = {}'.format(params)) -bst = xgb.train(params, dtrain, num_boost_round=10000, - evals=[(dtrain, 'train'), (dvalid, 'valid')], - early_stopping_rounds=50) - -# Run prediction on the validation set -df = pd.DataFrame({'Label (lower bound)': y_lower_bound[valid_index], - 'Label (upper bound)': y_upper_bound[valid_index], - 'Predicted label': bst.predict(dvalid)}) -print(df) -# Show only data points with right-censored labels -print(df[np.isinf(df['Label (upper bound)'])]) - -# Save trained model -bst.save_model('aft_best_model.json') diff --git a/ml-xgboost/demo/aft_survival/aft_survival_viz_demo.py b/ml-xgboost/demo/aft_survival/aft_survival_viz_demo.py deleted file mode 100644 index fe622f9..0000000 --- a/ml-xgboost/demo/aft_survival/aft_survival_viz_demo.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Visual demo for survival analysis (regression) with Accelerated Failure Time (AFT) model. - -This demo uses 1D toy data and visualizes how XGBoost fits a tree ensemble. The ensemble model -starts out as a flat line and evolves into a step function in order to account for all ranged -labels. -""" -import numpy as np -import xgboost as xgb -import matplotlib.pyplot as plt - -plt.rcParams.update({'font.size': 13}) - -# Function to visualize censored labels -def plot_censored_labels(X, y_lower, y_upper): - def replace_inf(x, target_value): - x[np.isinf(x)] = target_value - return x - plt.plot(X, y_lower, 'o', label='y_lower', color='blue') - plt.plot(X, y_upper, 'o', label='y_upper', color='fuchsia') - plt.vlines(X, ymin=replace_inf(y_lower, 0.01), ymax=replace_inf(y_upper, 1000), - label='Range for y', color='gray') - -# Toy data -X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1)) -INF = np.inf -y_lower = np.array([ 10, 15, -INF, 30, 100]) -y_upper = np.array([INF, INF, 20, 50, INF]) - -# Visualize toy data -plt.figure(figsize=(5, 4)) -plot_censored_labels(X, y_lower, y_upper) -plt.ylim((6, 200)) -plt.legend(loc='lower right') -plt.title('Toy data') -plt.xlabel('Input feature') -plt.ylabel('Label') -plt.yscale('log') -plt.tight_layout() -plt.show(block=True) - -# Will be used to visualize XGBoost model -grid_pts = np.linspace(0.8, 5.2, 1000).reshape((-1, 1)) - -# Train AFT model using XGBoost -dmat = xgb.DMatrix(X) -dmat.set_float_info('label_lower_bound', y_lower) -dmat.set_float_info('label_upper_bound', y_upper) -params = {'max_depth': 3, 'objective':'survival:aft', 'min_child_weight': 0} - -accuracy_history = [] -def plot_intermediate_model_callback(env): - """Custom callback to plot intermediate models""" - # Compute y_pred = prediction using the intermediate model, at current boosting iteration - y_pred = env.model.predict(dmat) - # "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes - # the corresponding predicted label (y_pred) - acc = np.sum(np.logical_and(y_pred >= y_lower, y_pred <= y_upper)/len(X) * 100) - accuracy_history.append(acc) - - # Plot ranged labels as well as predictions by the model - plt.subplot(5, 3, env.iteration + 1) - plot_censored_labels(X, y_lower, y_upper) - y_pred_grid_pts = env.model.predict(xgb.DMatrix(grid_pts)) - plt.plot(grid_pts, y_pred_grid_pts, 'r-', label='XGBoost AFT model', linewidth=4) - plt.title('Iteration {}'.format(env.iteration), x=0.5, y=0.8) - plt.xlim((0.8, 5.2)) - plt.ylim((1 if np.min(y_pred) < 6 else 6, 200)) - plt.yscale('log') - -res = {} -plt.figure(figsize=(12,13)) -bst = xgb.train(params, dmat, 15, [(dmat, 'train')], evals_result=res, - callbacks=[plot_intermediate_model_callback]) -plt.tight_layout() -plt.legend(loc='lower center', ncol=4, - bbox_to_anchor=(0.5, 0), - bbox_transform=plt.gcf().transFigure) -plt.tight_layout() - -# Plot negative log likelihood over boosting iterations -plt.figure(figsize=(8,3)) -plt.subplot(1, 2, 1) -plt.plot(res['train']['aft-nloglik'], 'b-o', label='aft-nloglik') -plt.xlabel('# Boosting Iterations') -plt.legend(loc='best') - -# Plot "accuracy" over boosting iterations -# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes -# the corresponding predicted label (y_pred) -plt.subplot(1, 2, 2) -plt.plot(accuracy_history, 'r-o', label='Accuracy (%)') -plt.xlabel('# Boosting Iterations') -plt.legend(loc='best') -plt.tight_layout() - -plt.show() diff --git a/ml-xgboost/demo/binary_classification/README.md b/ml-xgboost/demo/binary_classification/README.md deleted file mode 100644 index 8947adf..0000000 --- a/ml-xgboost/demo/binary_classification/README.md +++ /dev/null @@ -1,164 +0,0 @@ -Binary Classification -===================== -This is the quick start tutorial for xgboost CLI version. -Here we demonstrate how to use XGBoost for a binary classification task. Before getting started, make sure you compile xgboost in the root directory of the project by typing ```make```. -The script 'runexp.sh' can be used to run the demo. Here we use [mushroom dataset](https://archive.ics.uci.edu/ml/datasets/Mushroom) from UCI machine learning repository. - -### Tutorial -#### Generate Input Data -XGBoost takes LibSVM format. An example of faked input data is below: -``` -1 101:1.2 102:0.03 -0 1:2.1 10001:300 10002:400 -... -``` -Each line represent a single instance, and in the first line '1' is the instance label,'101' and '102' are feature indices, '1.2' and '0.03' are feature values. In the binary classification case, '1' is used to indicate positive samples, and '0' is used to indicate negative samples. We also support probability values in [0,1] as label, to indicate the probability of the instance being positive. - - -First we will transform the dataset into classic LibSVM format and split the data into training set and test set by running: -``` -python mapfeat.py -python mknfold.py agaricus.txt 1 -``` -The two files, 'agaricus.txt.train' and 'agaricus.txt.test' will be used as training set and test set. - -#### Training -Then we can run the training process: -``` -../../xgboost mushroom.conf -``` - -mushroom.conf is the configuration for both training and testing. Each line containing the [attribute]=[value] configuration: - -```conf -# General Parameters, see comment for each definition -# can be gbtree or gblinear -booster = gbtree -# choose logistic regression loss function for binary classification -objective = binary:logistic - -# Tree Booster Parameters -# step size shrinkage -eta = 1.0 -# minimum loss reduction required to make a further partition -gamma = 1.0 -# minimum sum of instance weight(hessian) needed in a child -min_child_weight = 1 -# maximum depth of a tree -max_depth = 3 - -# Task Parameters -# the number of round to do boosting -num_round = 2 -# 0 means do not save any model except the final round model -save_period = 0 -# The path of training data -data = "agaricus.txt.train" -# The path of validation data, used to monitor training process, here [test] sets name of the validation set -eval[test] = "agaricus.txt.test" -# The path of test data -test:data = "agaricus.txt.test" -``` -We use the tree booster and logistic regression objective in our setting. This indicates that we accomplish our task using classic gradient boosting regression tree(GBRT), which is a promising method for binary classification. - -The parameters shown in the example gives the most common ones that are needed to use xgboost. -If you are interested in more parameter settings, the complete parameter settings and detailed descriptions are [here](../../doc/parameter.rst). Besides putting the parameters in the configuration file, we can set them by passing them as arguments as below: - -``` -../../xgboost mushroom.conf max_depth=6 -``` -This means that the parameter max_depth will be set as 6 rather than 3 in the conf file. When you use command line, make sure max_depth=6 is passed in as single argument, i.e. do not contain space in the argument. When a parameter setting is provided in both command line input and the config file, the command line setting will override the setting in config file. - -In this example, we use tree booster for gradient boosting. If you would like to use linear booster for regression, you can keep all the parameters except booster and the tree booster parameters as below: -```conf -# General Parameters -# choose the linear booster -booster = gblinear -... - -# Change Tree Booster Parameters into Linear Booster Parameters -# L2 regularization term on weights, default 0 -lambda = 0.01 -# L1 regularization term on weights, default 0 -alpha = 0.01 -# L2 regularization term on bias, default 0 -lambda_bias = 0.01 - -# Regression Parameters -... -``` - -#### Get Predictions -After training, we can use the output model to get the prediction of the test data: -``` -../../xgboost mushroom.conf task=pred model_in=0002.model -``` -For binary classification, the output predictions are probability confidence scores in [0,1], corresponds to the probability of the label to be positive. - -#### Dump Model -This is a preliminary feature, so only tree models support text dump. XGBoost can display the tree models in text or JSON files, and we can scan the model in an easy way: -``` -../../xgboost mushroom.conf task=dump model_in=0002.model name_dump=dump.raw.txt -../../xgboost mushroom.conf task=dump model_in=0002.model fmap=featmap.txt name_dump=dump.nice.txt -``` - -In this demo, the tree boosters obtained will be printed in dump.raw.txt and dump.nice.txt, and the latter one is easier to understand because of usage of feature mapping featmap.txt - -Format of ```featmap.txt: \n ```: - - Feature id must be from 0 to number of features, in sorted order. - - i means this feature is binary indicator feature - - q means this feature is a quantitative value, such as age, time, can be missing - - int means this feature is integer value (when int is hinted, the decision boundary will be integer) - -#### Monitoring Progress -When you run training we can find there are messages displayed on screen -``` -tree train end, 1 roots, 12 extra nodes, 0 pruned nodes ,max_depth=3 -[0] test-error:0.016139 -boosting round 1, 0 sec elapsed - -tree train end, 1 roots, 10 extra nodes, 0 pruned nodes ,max_depth=3 -[1] test-error:0.000000 -``` -The messages for evaluation are printed into stderr, so if you want only to log the evaluation progress, simply type -``` -../../xgboost mushroom.conf 2>log.txt -``` -Then you can find the following content in log.txt -``` -[0] test-error:0.016139 -[1] test-error:0.000000 -``` -We can also monitor both training and test statistics, by adding following lines to configure -```conf -eval[test] = "agaricus.txt.test" -eval[trainname] = "agaricus.txt.train" -``` -Run the command again, we can find the log file becomes -``` -[0] test-error:0.016139 trainname-error:0.014433 -[1] test-error:0.000000 trainname-error:0.001228 -``` -The rule is eval[name-printed-in-log] = filename, then the file will be added to monitoring process, and evaluated each round. - -xgboost also supports monitoring multiple metrics, suppose we also want to monitor average log-likelihood of each prediction during training, simply add ```eval_metric=logloss``` to configure. Run again, we can find the log file becomes -``` -[0] test-error:0.016139 test-negllik:0.029795 trainname-error:0.014433 trainname-negllik:0.027023 -[1] test-error:0.000000 test-negllik:0.000000 trainname-error:0.001228 trainname-negllik:0.002457 -``` -### Saving Progress Models -If you want to save model every two round, simply set save_period=2. You will find 0002.model in the current folder. If you want to change the output folder of models, add model_dir=foldername. By default xgboost saves the model of last round. - -#### Continue from Existing Model -If you want to continue boosting from existing model, say 0002.model, use -``` -../../xgboost mushroom.conf model_in=0002.model num_round=2 model_out=continue.model -``` -xgboost will load from 0002.model continue boosting for 2 rounds, and save output to continue.model. However, beware that the training and evaluation data specified in mushroom.conf should not change when you use this function. -#### Use Multi-Threading -When you are working with a large dataset, you may want to take advantage of parallelism. If your compiler supports OpenMP, xgboost is naturally multi-threaded, to set number of parallel running add ```nthread``` parameter to your configuration. -Eg. ```nthread=10``` - -Set nthread to be the number of your real cpu (On Unix, this can be found using ```lscpu```) -Some systems will have ```Thread(s) per core = 2```, for example, a 4 core cpu with 8 threads, in such case set ```nthread=4``` and not 8. - diff --git a/ml-xgboost/demo/binary_classification/agaricus-lepiota.data b/ml-xgboost/demo/binary_classification/agaricus-lepiota.data deleted file mode 100644 index 14fe8bb..0000000 --- a/ml-xgboost/demo/binary_classification/agaricus-lepiota.data +++ /dev/null @@ -1,8124 +0,0 @@ -p,x,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -p,x,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,s,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,b,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -p,x,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,g -e,b,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -p,x,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -e,s,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,y,u -e,f,f,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -p,x,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,g -p,x,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,u -p,x,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,u -e,b,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -p,x,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,g -e,b,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,b,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,b,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -p,f,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,y,u -e,x,s,y,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -p,x,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,p -e,b,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,f,y,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,s,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -p,x,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,f,y,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,p -e,x,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,y,u -p,x,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,p -e,f,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,p -e,x,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,g -e,x,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -p,x,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -p,x,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,b,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,f,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,b,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,p -e,s,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,b,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,b,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,b,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,b,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,s,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,g -e,x,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,f,y,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,f,y,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,p -e,b,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,s,y,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,s,w,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,f,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,p -p,x,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,s,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -p,x,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,g -e,f,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,y,u -e,x,f,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,g -e,x,s,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -e,b,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,g -e,s,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,y,u -e,b,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,g -e,x,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,f,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -e,b,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,g -e,x,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,p -e,b,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,g -e,b,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,s,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,y,u -e,x,f,w,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -p,x,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,g -e,s,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,y,u -e,x,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,s,y,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,s,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,y,u -p,x,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -p,f,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,g -e,f,s,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -e,b,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,f,f,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,b,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,s,w,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,s,w,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -p,x,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,f,w,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -p,x,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,g -e,b,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,p -e,b,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,s,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,p -e,x,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,f,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,f,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,b,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,s,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,s,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,s,w,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,g -e,b,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,y,u -e,f,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,p -e,b,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,g -e,b,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,y,u -e,b,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,y,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,b,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,g -e,b,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,b,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,b,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,b,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -p,x,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,s,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,y,u -e,f,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,p -p,x,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,g -e,b,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,b,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,f,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,s,w,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,s,y,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,y,u -e,b,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,s,w,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,f,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -e,f,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,p -p,x,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,b,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,f,y,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,y,u -e,f,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,g -e,b,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,s,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -p,x,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,g -e,f,f,w,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,b,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -p,x,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,b,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,y,u -p,x,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,p -e,f,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,b,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,w,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,g -e,f,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,p -e,f,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,p -e,x,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -p,x,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,f,w,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,s,w,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,p -e,x,s,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -e,f,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,g -p,x,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,g -e,s,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,v,u -e,b,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,b,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,b,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,g -p,x,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,f,w,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,f,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,s,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,b,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -p,f,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -p,f,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,p -e,x,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,p -e,f,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,f,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -p,x,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,g -e,b,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,s,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -e,f,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,g -e,b,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,p -e,f,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,y,u -e,x,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,y,u -e,b,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,f,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,s,y,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -p,x,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,f,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -e,b,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,s,y,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -e,b,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,f,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -p,x,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,f,w,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -p,x,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,s,y,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,f,w,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,s,w,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,f,y,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,p -e,b,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -p,x,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,b,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -p,x,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,u -e,b,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,b,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,g -e,b,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,f,y,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,y,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,s,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,y,u -e,f,s,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,p -e,b,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,p -e,s,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,p -p,x,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,s,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,p -e,x,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,p -e,x,f,y,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,b,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,g -e,f,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,s,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,y,u -e,x,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,s,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,y,u -e,b,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -p,x,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,s,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -p,x,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,p -e,x,s,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -e,b,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,b,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,b,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,g -e,x,f,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -p,x,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,u -e,b,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -p,x,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,b,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,p -e,x,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,g -p,x,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,g -e,b,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,w,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -p,x,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,f,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -p,x,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,u -e,f,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,g -e,x,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,s,w,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,p -e,b,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,s,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,g -e,b,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,s,w,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,f,y,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,b,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,p -e,x,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,g -e,x,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,f,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,g -e,b,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,b,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,f,y,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,s,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -e,b,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,p -e,f,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,g -e,b,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,p -e,x,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,f,f,w,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,s,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,y,u -e,f,s,y,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,y,u -e,x,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,y,u -e,b,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,s,w,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,s,w,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,f,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,y,u -e,f,s,w,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,s,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,y,u -e,x,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,p -e,x,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,p -e,f,f,y,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -p,f,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,g -p,x,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,p -e,x,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,g -e,b,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,f,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,g -e,x,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,b,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -p,x,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,s,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,s,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,f,w,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,y,u -e,b,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,f,w,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,s,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,p -e,b,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,b,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,p -p,x,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,u -e,f,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,y,u -e,b,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,p -e,x,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,f,n,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,y,u -e,f,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -p,x,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,g -e,f,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,p -p,f,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,f,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -e,b,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,s,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,s,w,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,f,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -p,x,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,g -e,f,f,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,y,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,y,u -e,x,f,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,p -e,f,s,w,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,p -p,x,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,f,w,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,b,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,s,y,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,s,y,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,f,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -e,b,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -p,x,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,f,s,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -p,x,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,g -e,b,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,f,f,y,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,g -p,x,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,g -e,f,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,g -e,x,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,s,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,y,u -e,s,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -p,x,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,s,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -p,x,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -p,x,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,b,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -e,b,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -p,x,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,g -p,x,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,s,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,g -e,f,f,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,p -e,b,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,s,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,s,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,f,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -p,x,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,g -e,b,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,f,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -e,b,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,s,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,y,u -e,f,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,g -e,b,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,w,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,f,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,p -e,f,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,y,u -e,x,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,f,y,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,s,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,y,u -e,b,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,f,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,b,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,g -e,x,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,g -e,b,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,g -e,x,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,g -e,f,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,p -e,f,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,g -e,f,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,p -e,x,s,y,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,b,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,b,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,f,w,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,g -p,x,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,g -p,x,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,f,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -e,b,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,p -e,f,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,y,u -e,b,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -p,x,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,g -p,x,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,g -e,b,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,g -e,x,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,g -e,f,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,f,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,f,y,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,f,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,p -e,f,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,y,u -e,f,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,p -e,b,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,s,w,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,f,y,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,b,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,f,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,b,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,p -e,f,s,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -p,x,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,g -e,b,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -p,x,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,g -p,x,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,s,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -p,x,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,g -e,f,f,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,p -e,b,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,f,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,p -e,f,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,p -e,b,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,s,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,y,u -e,x,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,b,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,f,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,p -e,f,s,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,b,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,b,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,p -e,x,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,g -e,s,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,g -e,x,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,s,y,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,s,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,f,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,g -p,x,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -e,b,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,f,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -p,x,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,g -p,x,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,w,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -p,x,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,b,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,g -e,b,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,p -e,x,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -p,x,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,s,y,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,p -e,b,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,f,s,w,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,b,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,b,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,b,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,s,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -e,b,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,p -e,b,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,f,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,p -e,f,f,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,s,w,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,y,u -e,b,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,b,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,y,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,g -p,x,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,s,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,y,u -e,b,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -p,x,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -p,x,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,p -e,b,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,p -e,x,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -p,x,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,g -p,x,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,u -e,b,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,s,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -p,x,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,p -e,b,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -p,x,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,b,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,f,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,g -e,b,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,g -e,b,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,y,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,b,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -p,x,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -p,x,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,g -p,x,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,s,w,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,b,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,b,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,p -e,x,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,f,w,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,w,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,s,y,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,s,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,f,y,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,g -e,x,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,p -p,x,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -p,x,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,s,y,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -p,x,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,g -e,b,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,s,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,y,u -e,x,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,f,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,b,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,s,w,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,b,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,g -e,s,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,y,u -e,x,f,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -e,s,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,p -e,f,s,y,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,b,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,b,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -p,x,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,g -e,f,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,g -e,x,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,y,u -e,x,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,f,y,n,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,p -e,x,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,p -e,x,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,g -e,b,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,f,f,w,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,s,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,g -e,x,s,w,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,s,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,p -e,x,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,b,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,g -e,b,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,p -e,x,f,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,s,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,f,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -p,f,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,f,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -p,f,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -p,x,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,g -e,b,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,f,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,f,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,s,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,f,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,s,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,f,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,s,y,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,f,f,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,s,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,s,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -p,x,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,y,u -e,b,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,s,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,f,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -p,f,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,y,u -e,f,s,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,f,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,s,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -e,b,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -p,x,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,f,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,f,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -e,b,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,f,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -p,f,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,u -p,x,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,u -e,b,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,s,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -e,b,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -p,x,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,w,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,f,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,f,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,s,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,s,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -p,f,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,s,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,f,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,f,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,f,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,s,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,b,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,f,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,f,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,f,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -p,x,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,f,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,f,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,s,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,s,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,s,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,s,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,m -e,f,s,w,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,b,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -p,f,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,g -e,f,s,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -p,x,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,s,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,s,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,f,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,p -e,f,s,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,f,y,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,f,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -p,f,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,f,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -e,b,s,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,b,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,y,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -p,f,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -p,f,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,s,y,t,l,f,w,n,w,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -p,x,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,f,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,f,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,s,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,f,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,f,y,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,g,f,n,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,y,u -p,x,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,p -e,f,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,p -e,x,f,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,s,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -p,f,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,f,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,f,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,y,y,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,p -e,x,f,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -e,b,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,f,w,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,s,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,f,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,s,y,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,s,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -p,f,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,u -e,f,f,y,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,f,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,g -e,x,s,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -p,f,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,p -e,f,s,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,y,u -p,f,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,g -e,f,f,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,p -e,f,f,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -p,f,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,b,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,f,f,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -e,b,s,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,f,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -e,b,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -p,f,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,g -e,s,f,n,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,y,u -e,x,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,g -e,f,f,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,f,y,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,s,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -p,f,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,f,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,s,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -p,x,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,s,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -p,x,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,g -e,f,f,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,s,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -p,x,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,f,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -e,b,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,s,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -p,f,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,g -e,f,f,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,g -e,f,f,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,s,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,f,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,s,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -e,f,f,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,s,y,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,f,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,s,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -e,b,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -p,f,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -p,x,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,g -e,f,s,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,f,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -e,b,y,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,f,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,f,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,f,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,s,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,f,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,s,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,f,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,s,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,s,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,p -p,x,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,g -e,f,s,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -p,x,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,g -e,b,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,f,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,s,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,b,y,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,f,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,s,p -e,x,s,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,b,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,m -p,x,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,s,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -e,b,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,s,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,f,g,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,f,g,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,y,u -e,b,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,f,f,n,f,n,f,c,n,g,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,y,n,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,p -e,x,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,p -e,x,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,k,y,g -e,f,f,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,s,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,s,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -p,f,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,y,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,y,n,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,p -e,b,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -p,x,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,f,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -e,b,y,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,s,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,s,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,g -e,b,s,w,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,s,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -p,f,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,g -e,f,s,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -p,f,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,u -e,f,s,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,s,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,s,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,f,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -p,f,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,s,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -e,f,f,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,f,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,g -e,x,s,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,f,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,s,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,f,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -p,x,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,f,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -p,f,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,g -e,f,f,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,s,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,f,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,s,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,f,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,s,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,s,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -p,f,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,s,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -p,x,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,g -p,f,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,u -e,f,s,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,s,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,f,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,f,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,s,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -p,f,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,g -p,f,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,s,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -p,f,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,g -e,f,f,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,s,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -p,f,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,g -p,f,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,g -p,f,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,g -p,f,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,s,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,s,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,f,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -p,f,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,s,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,s,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,s,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -p,f,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,g -e,f,f,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,f,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -p,f,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,f,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,f,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,s,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,s,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -e,b,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,s,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,f,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -p,x,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,u -e,f,s,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,s,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,f,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,f,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -p,f,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,g -p,f,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,s,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,s,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,s,p -e,f,s,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,s,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,s,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,s,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,f,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -e,f,s,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,f,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,s,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -p,f,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,f,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -e,b,y,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,f,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,s,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -e,b,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,s,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -p,x,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,g -e,f,f,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -p,f,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,s,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,s,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -e,b,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,s,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,s,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,f,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,f,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,s,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -p,f,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,g -e,f,f,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,s,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,s,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,f,f,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,s,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,f,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -p,f,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,u -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,s,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,f,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -p,f,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,s,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,s,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,s,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,f,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,f,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,f,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,s,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -e,f,f,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -p,f,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,f,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,s,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,f,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,f,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,s,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,f,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,s,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,f,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -e,b,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -p,x,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,g -p,f,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,g -e,f,f,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,f,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -p,f,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,g -e,f,f,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -p,x,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,s,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -p,f,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,s,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,s,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,f,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,s,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,s,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,s,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,s,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -e,f,s,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -p,f,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,g -e,f,s,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,s,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -p,f,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,s,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,s,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,s,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,s,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,s,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,s,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,f,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,f,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,s,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -p,x,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,s,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,f,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,s,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,f,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,f,s,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,s,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,p -p,f,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,s,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -p,f,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,g -p,x,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,u -p,f,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -e,b,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -p,f,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,g -e,f,f,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,s,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,s,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,f,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,f,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -p,f,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,u -p,f,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,s,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,s,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,f,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -p,f,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -p,f,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,s,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,f,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,s,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,s,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -p,f,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,s,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,s,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,s,w,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,f,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,s,y,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,f,f,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,s,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -p,f,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,f,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,s,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -p,f,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,f,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,s,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,f,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,s,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,y,g -p,x,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,f,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,s,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,s,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -p,f,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,g -e,f,f,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -p,f,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,u -e,f,s,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,s,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,f,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,f,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,s,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,s,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,f,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,f,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,b,y,y,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -e,f,f,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,f,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -p,f,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,f,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,f,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,s,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,s,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,f,w,t,a,f,w,n,p,t,b,s,s,w,w,p,w,o,p,u,v,d -e,f,f,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -e,b,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,x,f,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,f,s,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,s,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,s,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,s,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -p,f,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,u -e,f,s,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -p,f,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,u -e,f,s,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -p,x,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,s,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -p,x,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,s,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -p,f,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,f,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,f,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -p,f,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,u -e,f,f,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,y,y,t,a,f,c,b,w,e,r,s,y,w,w,p,w,o,p,k,y,g -e,x,f,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -e,b,s,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,m -e,f,s,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,f,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,f,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -p,f,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,s,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -e,b,y,y,t,a,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,f,s,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,s,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,f,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,s,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,f,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -e,b,y,w,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -p,f,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,s,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -p,f,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,s,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,f,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,s,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,s,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,s,g -e,f,s,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,s,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -p,x,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,g -e,f,f,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -p,f,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,g -e,f,f,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,f,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -p,f,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,s,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -p,x,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,f,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,p -e,f,f,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,s,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,f,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,f,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,s,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,s,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,s,g -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,s,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,f,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,s,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,f,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,f,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,s,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,s,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,s,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,y,u -p,f,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,u -p,f,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,s,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,f,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,s,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,s,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,s,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -p,f,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -p,f,s,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,u -p,f,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,g -p,f,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,u -e,f,f,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,s,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,f,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,s,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -p,f,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,g -p,f,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,g -e,f,s,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,f,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,s,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,s,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,s,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,s,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,s,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,s,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,s,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,s,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,f,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,f,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,f,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,f,f,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,f,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,s,y,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,s,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -p,x,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,f,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -p,x,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -p,f,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,y,y,t,l,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,y,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,y,n,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,s,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,s,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,s,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,s,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,s,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,s,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,s,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -e,b,s,w,t,l,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,n,g -e,x,s,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,s,y,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,s,m -p,f,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,g -e,f,s,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,s,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,s,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,f,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,f,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,s,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -e,f,s,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,s,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,f,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,s,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,f,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -p,f,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,f,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,f,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -p,f,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,f,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,s,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,f,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,y,w,t,l,f,c,b,w,e,c,s,s,w,w,p,w,o,p,k,n,g -e,x,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,m -e,f,s,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,f,s,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,s,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -e,b,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,n,m -p,f,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,s,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,s,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,y,n,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,k,s,g -e,x,s,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,s,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -p,x,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,s,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -p,f,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,f,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,s,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -p,f,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,f,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,s,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,f,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,y,n,t,l,f,c,b,w,e,r,s,y,w,w,p,w,o,p,n,y,g -p,f,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,f,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,y,u -p,f,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,u -e,f,s,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,y,y,t,a,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,y,p -e,x,f,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,f,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -p,f,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,g -e,f,f,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,s,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,s,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,s,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -p,x,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,u -e,f,s,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,g -e,x,s,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,y,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,a,g -p,f,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,u -p,f,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,s,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,f,g,f,n,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,s,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,s,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,a,g -p,f,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,g -e,b,s,y,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -p,f,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,s,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,s,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,s,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -p,f,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,s,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -p,f,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,f,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -p,f,y,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,s,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,f,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,s,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,f,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -p,f,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,g -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -p,x,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,s,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,y,y,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,g -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,g -e,f,s,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,s,w,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,s,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,f,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -p,x,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,n,g -e,f,f,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,s,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,s,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,f,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,s,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,f,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -e,f,f,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -p,f,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,s,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,s,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,f,y,t,a,f,w,n,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -p,f,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,s,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,f,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,s,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,s,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,s,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,s,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,f,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,f,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,s,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,s,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,s,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,f,w,t,l,f,w,n,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -p,f,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,g -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,s,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,f,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,s,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,s,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,s,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,s,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -e,f,s,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,f,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,f,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -e,f,f,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,s,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,s,w,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,g,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,y,y,t,l,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,y,g -p,f,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,s,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,f,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,f,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,f,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,f,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,s,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,s,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -p,f,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,u -e,b,s,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,g,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,s,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,s,w,t,l,f,w,n,n,t,b,s,s,w,w,p,w,o,p,u,v,d -e,x,f,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,s,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,s,n,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -p,f,s,w,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,f,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,f,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -p,x,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,v,g -e,x,f,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -p,f,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,f,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,f,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,a,g -e,f,s,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,s,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,s,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,f,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -e,f,f,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -p,x,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,u -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,s,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -p,f,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,f,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,f,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,s,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,s,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -p,f,y,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -p,f,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,u -p,f,y,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,s,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,f,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,s,n,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,f,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,s,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,s,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,a,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -p,f,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,y,y,t,l,f,c,b,n,e,r,s,y,w,w,p,w,o,p,n,s,g -p,f,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,g -e,f,f,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,f,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,a,g -p,f,s,n,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,s,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,f,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,f,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,f,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,s,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,a,g -e,f,f,w,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -e,f,s,y,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,w,t,a,f,w,n,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,n,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,f,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,a,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,s,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,a,g -e,b,y,w,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,s,n,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,s,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,s,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,f,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,s,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,f,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,s,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,y,y,t,a,f,c,b,p,e,r,s,y,w,w,p,w,o,p,n,s,p -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,f,n,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,f,w,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,y,w,t,a,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,s,m -e,x,s,n,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,s,w,t,a,f,c,b,n,e,c,s,s,w,w,p,w,o,p,k,n,m -e,f,s,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -p,f,y,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,g -e,f,f,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,f,w,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -p,f,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,u -e,f,s,g,f,n,f,w,b,p,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -p,x,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,s,y,t,l,f,c,b,k,e,c,s,s,w,w,p,w,o,p,k,n,m -e,x,f,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,s,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,f,g,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,s,g -e,f,s,w,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,f,g,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,f,g,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -e,f,f,n,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -p,f,y,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,s,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -e,b,s,w,t,a,f,c,b,g,e,c,s,s,w,w,p,w,o,p,n,s,m -e,x,s,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,f,n,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,g,f,n,f,w,b,p,t,e,f,s,w,w,p,w,o,e,k,s,g -p,x,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,s,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -p,x,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,g -p,f,s,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,f,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -p,f,s,n,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -p,x,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,s,g -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,s,g,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,s,w,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -p,x,s,p,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,f,w,f,n,f,w,b,k,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,f,g,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,p -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -p,f,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,k,v,g -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,f,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,f,g,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,s,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,s,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,a,g -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,f,n,f,n,f,w,b,n,t,e,s,f,w,w,p,w,o,e,k,s,g -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -p,x,s,p,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -p,f,s,n,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,s,w,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,f,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,n,s,g -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,s,g,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,n,s,g -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -p,f,y,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,s,u -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,s,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -p,f,s,w,t,p,f,c,n,w,e,e,s,s,w,w,p,w,o,p,n,v,u -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,s,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,s,g -e,x,s,g,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,n,s,g -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,f,n,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,k,a,g -p,x,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,n,v,g -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -p,x,s,w,t,p,f,c,n,n,e,e,s,s,w,w,p,w,o,p,k,v,g -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -p,x,s,p,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,g -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,f,g,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,n,a,g -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -p,f,y,n,t,p,f,c,n,k,e,e,s,s,w,w,p,w,o,p,k,s,u -e,x,s,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -p,x,s,p,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,s,w,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,n,s,g -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -p,f,y,w,t,p,f,c,n,p,e,e,s,s,w,w,p,w,o,p,n,v,u -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,s,w,f,n,f,w,b,h,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,f,n,f,n,f,w,b,k,t,e,f,s,w,w,p,w,o,e,n,a,g -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -p,x,s,w,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,s,w,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,p -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,f,p,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -p,x,f,w,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,g -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,p -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,p -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,s,n,f,n,f,w,b,p,t,e,s,f,w,w,p,w,o,e,k,a,g -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,f,w,f,n,f,w,b,h,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -p,x,f,p,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,s,n,f,n,f,w,b,n,t,e,s,s,w,w,p,w,o,e,n,a,g -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,g -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,s,g,f,n,f,w,b,h,t,e,f,s,w,w,p,w,o,e,k,s,g -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -p,x,f,g,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,s,g,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -p,x,f,w,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,f,w,f,n,f,w,b,h,t,e,s,s,w,w,p,w,o,e,n,s,g -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,n,f,n,f,w,b,n,t,e,f,f,w,w,p,w,o,e,k,s,g -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -p,x,f,p,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,s,n,f,n,f,w,b,p,t,e,f,f,w,w,p,w,o,e,n,s,g -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -p,x,s,p,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,s,w,f,n,f,w,b,k,t,e,s,f,w,w,p,w,o,e,k,s,g -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -p,x,s,g,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,f,n,f,n,f,w,b,k,t,e,s,s,w,w,p,w,o,e,k,a,g -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -p,x,f,w,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,p -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,p -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,p -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -e,x,f,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,g,f,n,f,w,b,n,t,e,f,s,w,w,p,w,o,e,n,s,g -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,f,g,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,p -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -p,x,f,w,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,g -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,p -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -p,x,f,p,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -p,x,f,g,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -p,x,s,p,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -p,x,s,w,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -p,x,s,g,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,p,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,d -p,x,s,w,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,p -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -p,x,s,w,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,p -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,p -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,p -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,g -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,g -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,g -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,g -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -p,x,s,g,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -p,x,s,p,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,p -p,x,s,g,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,g -p,x,f,g,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -p,x,s,g,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,w,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -p,x,s,w,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,p -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -p,x,s,w,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,w,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -p,x,s,w,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -e,x,y,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,f,p,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,p -p,x,f,w,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,p -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,p -p,x,s,g,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,p -p,x,f,p,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,g -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,p,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,w,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,g -p,x,s,p,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,g -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,p -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,g -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,g -p,x,f,w,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -p,x,s,g,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,p -p,x,s,w,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,p -p,x,s,w,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,p -e,x,y,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,g -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -p,x,s,w,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,g -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,g -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -p,x,s,g,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -p,x,f,p,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,g -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -p,x,s,p,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,p -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,p -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -p,x,s,g,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,f,p,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,g -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -p,x,f,w,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,p -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,p -p,x,s,p,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -p,x,f,p,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -p,x,s,w,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,g -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,g -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,g -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -p,x,f,w,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -p,x,f,p,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -p,x,s,w,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,f,p,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,p -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -p,x,s,p,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,g -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -p,x,f,w,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,p -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -p,x,f,g,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,p -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -p,x,s,g,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,g -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -p,x,f,w,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,g -p,x,f,g,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,g -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -p,x,f,p,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,s,g,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -p,x,s,p,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,p -e,x,f,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -p,x,s,p,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -e,x,y,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -p,x,s,w,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,d -p,x,f,p,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -p,x,f,g,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -p,x,f,p,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -p,x,s,w,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,g -p,x,s,g,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,s,p,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -p,x,s,g,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -p,x,s,g,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,s,g,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -p,x,f,g,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,s,g,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,p -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -p,x,f,g,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,p -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -p,x,f,p,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -p,x,f,p,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,s,w,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -p,x,s,p,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,g -p,x,f,g,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,p -p,x,f,w,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -p,x,f,w,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -e,x,y,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -p,x,f,w,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -p,x,f,g,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,s,p,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,p -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -p,x,f,g,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,p -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -p,x,s,w,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,w,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,g -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,g -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -p,x,f,w,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,g -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -p,x,s,g,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -p,x,s,p,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,y,d -p,x,f,p,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,y,d -p,x,s,w,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -p,x,s,g,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -p,x,s,p,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -p,x,s,p,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -p,x,f,p,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,d -p,x,s,g,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,f,g,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,g -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,p -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,g -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -p,x,f,g,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -p,x,f,p,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,g -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,g -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,g -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,d -p,x,s,g,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,g -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,p -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -p,x,f,p,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,g -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,p -e,f,f,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,f,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -p,x,f,p,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,p -e,x,f,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -p,x,s,g,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,g -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,g -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,g -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,p -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -p,x,f,g,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,y,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,p -e,x,y,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -p,x,s,w,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -p,x,s,p,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -p,x,s,p,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,y,d -p,x,s,w,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,g -p,x,f,w,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -p,x,f,w,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,g -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,n,v,d -p,x,s,p,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -p,x,s,p,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,g -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -p,x,f,p,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,p -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,y,d -p,x,f,g,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,g -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -e,x,f,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -e,x,y,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,s,w,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,g,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,g -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,d -p,x,f,w,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,f,g,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,s,g,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,f,g,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -p,x,s,g,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,w,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,g -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -e,x,f,g,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,f,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,p -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,y,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,p -e,x,y,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,y,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -p,x,f,g,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -p,x,s,w,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -p,x,s,w,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,y,d -p,x,s,g,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,v,d -p,x,f,p,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -p,x,s,p,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,g -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,g -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,v,d -p,x,s,p,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,g,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,d -e,x,y,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,k,y,d -p,x,f,w,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,y,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,g -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -p,x,s,w,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,k,v,d -p,x,s,w,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -e,f,y,g,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,p -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,g -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,y,d -p,x,f,g,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,y,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,y,d -p,x,s,p,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,g -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,n,v,d -p,x,s,w,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,w,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -e,x,y,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,f,w,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -p,x,f,w,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,s,g,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -e,x,y,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,d -p,x,f,g,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,s,g,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -e,x,f,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -p,x,f,p,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -p,x,s,p,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,g,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -p,x,f,g,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,s,g,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -p,x,f,g,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,k,v,d -p,x,s,w,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -e,f,y,n,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,g -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,g -e,f,y,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,v,d -p,x,f,w,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,g -p,x,f,w,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,d -e,f,y,g,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,k,y,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,g -e,x,f,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,g -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,g -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,n,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,p -p,x,s,p,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,g -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,k,y,d -p,x,f,p,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,g -e,x,y,g,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,g,p,w,o,p,n,y,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,g -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,g -p,x,s,p,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,g,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,v,d -p,x,s,g,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,u -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,p -p,x,s,w,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,p -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,p -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,p -p,x,s,p,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,g -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,g -p,x,s,w,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,n,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,g -p,x,f,p,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -p,f,s,w,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,p -e,x,y,b,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,g -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,y,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,g -p,x,f,g,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,f,g,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,g -p,x,s,g,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,g -e,f,y,g,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,y,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,p -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,p -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,n,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,p -e,x,f,e,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,p -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,d -e,x,f,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,y,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,p -p,x,y,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,g -p,x,s,g,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,g -p,f,s,b,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,d -p,x,s,w,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,d -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,p,p,w,o,p,k,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,k,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,s,w,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,w,p,w,o,p,k,v,d -p,x,f,w,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,f,p,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,n,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,p -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,p -p,x,f,g,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,s,d -p,x,s,p,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -e,x,f,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,g -e,f,f,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,k,y,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,p -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,n,y,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,y,n,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,g -e,f,y,u,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,y,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,p -e,x,f,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,d -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,w,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,y,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,g -e,x,f,e,t,n,f,c,b,w,t,b,s,s,p,p,p,w,o,p,k,v,d -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,p -p,x,f,w,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,y,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,n,y,d -p,x,y,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -e,f,y,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -p,b,s,b,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,g,p,w,o,p,k,y,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,g -e,f,y,g,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,k,y,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,g -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,v,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,p -e,f,y,e,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,k,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,g -e,f,f,n,t,n,f,c,b,u,t,b,s,s,w,p,p,w,o,p,k,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,k,y,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,p,t,b,s,s,g,g,p,w,o,p,k,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,s,g,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,w,t,b,s,s,p,g,p,w,o,p,k,v,d -p,f,s,g,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,u -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,p -e,f,y,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,n,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,v,d -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,p -e,f,y,n,t,n,f,c,b,u,t,b,s,s,g,p,p,w,o,p,k,y,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,g -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,d -e,f,f,c,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,g -p,x,s,b,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,u -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,k,y,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,p -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,k,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,d -e,f,f,g,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,g -p,x,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,p -e,f,y,n,t,n,f,c,b,p,t,b,s,s,p,w,p,w,o,p,n,y,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,n,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,g -p,f,s,b,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,u -e,f,f,e,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,y,d -p,x,y,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -e,x,y,g,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,v,d -p,x,f,g,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,p,f,c,f,w,n,n,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,y,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,g -p,x,s,g,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,u -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,g -p,f,s,g,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,u -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,p -e,f,y,e,t,n,f,c,b,p,t,b,s,s,w,w,p,w,o,p,n,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,p -p,f,s,w,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,u -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,d -p,x,s,w,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,g -p,x,s,b,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,g -e,f,y,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,y,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,p -p,x,f,p,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,p -e,k,y,n,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,d -p,x,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,g -e,f,s,p,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,g -e,k,s,p,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,g -e,f,y,g,t,n,f,c,b,w,t,b,s,s,p,w,p,w,o,p,n,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,p -e,x,y,r,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,v,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,d -p,x,y,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,d -p,k,y,n,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,g -e,f,y,e,t,n,f,c,b,w,t,b,s,s,g,p,p,w,o,p,k,y,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,g -p,f,s,b,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,u -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,g -e,x,s,e,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,g -p,b,y,w,t,n,f,w,n,w,e,b,s,s,w,w,p,w,o,p,w,c,l -p,f,s,b,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,u -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,p -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,g -p,x,s,w,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,u -e,f,y,g,t,n,f,c,b,n,t,b,s,s,g,p,p,w,o,p,n,v,d -p,x,y,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,g -e,f,y,e,t,n,f,c,b,n,t,b,s,s,p,g,p,w,o,p,k,v,d -e,x,y,b,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,g -p,x,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,p -e,x,y,u,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,d -e,f,f,e,t,n,f,c,b,u,t,b,s,s,g,w,p,w,o,p,n,y,d -p,f,s,b,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,u -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,d -e,f,y,n,t,n,f,c,b,u,t,b,s,s,w,g,p,w,o,p,k,v,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,g -e,x,s,b,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,p -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,k,y,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,y,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,g -p,x,s,g,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,g -p,f,s,g,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,u -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,d -e,f,y,g,t,n,f,c,b,u,t,b,s,s,p,g,p,w,o,p,k,y,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,p -e,f,y,n,t,n,f,c,b,n,t,b,s,s,g,g,p,w,o,p,n,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,g -e,k,y,b,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,p -p,x,y,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,d -p,x,s,w,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,u -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,p,t,b,s,s,w,p,p,w,o,p,k,y,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,d -e,f,f,c,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,f,y,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,p -p,x,s,g,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,g -e,f,s,n,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,x,s,g,f,c,f,w,n,u,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,d -e,x,s,b,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,y,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,g -e,x,y,u,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,v,d -e,f,y,b,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,g -p,f,s,b,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,g -p,f,s,w,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,g,g,p,w,o,p,n,v,d -e,x,y,u,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,p -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,g,p,w,o,p,n,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,p -p,x,f,w,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,n,v,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,g -p,x,y,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -e,f,y,b,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,p -p,x,f,g,f,c,f,c,n,u,e,b,s,s,w,w,p,w,o,p,k,s,d -p,f,s,b,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,g -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,k,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,g -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,k,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,g -p,f,s,w,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,u -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,d -e,f,y,u,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,g -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,p,p,w,o,p,n,y,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,g -p,x,s,w,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,u -e,f,y,e,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -e,x,y,u,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,p -e,f,s,p,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,p -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,d -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,n,t,b,s,s,w,g,p,w,o,p,n,y,d -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,p -p,x,s,g,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,p -p,x,f,w,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,s,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,d -p,f,s,b,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,u -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,g -p,f,s,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,s,b,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,u -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,p -e,f,y,u,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,g -e,f,f,e,t,n,f,c,b,w,t,b,s,s,g,w,p,w,o,p,n,v,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,g -p,x,y,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,g -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,g -e,f,f,e,t,n,f,c,b,w,t,b,s,s,w,w,p,w,o,p,k,y,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,d -p,x,f,p,f,c,f,w,n,p,e,b,s,s,w,w,p,w,o,p,k,s,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,p -e,x,s,p,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,d -p,x,s,p,f,c,f,w,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,g -e,f,f,e,t,n,f,c,b,p,t,b,s,s,p,p,p,w,o,p,n,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,g -e,f,y,n,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,p -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,g -e,f,f,g,t,n,f,c,b,u,t,b,s,s,w,w,p,w,o,p,n,y,d -p,x,s,g,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,g -p,x,y,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,s,g,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,d -e,f,y,e,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,g -e,f,y,n,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,g -p,k,f,n,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,y,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,p -p,f,s,w,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,g -e,x,y,e,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,d -p,b,f,y,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,s,g,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,u -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,g -e,k,y,e,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,p -e,k,y,n,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,x,s,w,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,p -p,b,s,b,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,s,w,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,g -e,f,y,e,t,n,f,c,b,n,t,b,s,s,w,p,p,w,o,p,n,y,d -p,b,s,w,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -e,f,y,p,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,s,w,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,g -e,k,y,n,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,p -p,f,s,b,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,u -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,p -e,k,s,e,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -e,f,y,n,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -e,f,s,b,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,d -e,f,f,g,t,n,f,c,b,u,t,b,s,s,p,p,p,w,o,p,n,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,s,g,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,g -p,x,y,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,p -e,f,y,n,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -e,f,y,e,t,n,f,c,b,p,t,b,s,s,g,p,p,w,o,p,k,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,p,w,p,w,o,p,n,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,p -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,d -p,b,y,w,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,p -p,x,y,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,p -e,f,y,p,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,s,w,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,g -p,f,s,b,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,u -p,x,f,g,f,c,f,c,n,g,e,b,s,s,w,w,p,w,o,p,k,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,p -e,x,y,r,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,y,d -e,f,y,w,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,p -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,y,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,x,y,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,p -e,f,s,e,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,p -p,f,s,g,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,p -p,x,s,g,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,u -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,g -p,x,s,w,f,c,f,c,n,n,e,b,s,s,w,w,p,w,o,p,k,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,p -e,f,y,e,t,n,f,c,b,u,t,b,s,s,p,w,p,w,o,p,n,y,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,p -e,k,s,b,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,g -e,x,f,n,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -e,x,y,e,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,g -p,f,s,g,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,y,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -e,k,s,b,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,d -p,x,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,g -e,k,s,p,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,n,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,p -p,f,s,b,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,u -e,f,s,b,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,s,w,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,u -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,p -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,s,b,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,u -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,d -p,f,s,g,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,u -e,f,s,b,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -e,x,s,p,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -e,k,s,n,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,y,p,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,p -e,f,y,u,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,y,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,g -p,x,y,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,p -e,f,s,b,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,d -e,x,s,n,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,y,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,y,n,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,b,s,b,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,d -e,f,y,u,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,y,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,g -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,d -e,x,y,e,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,f,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,g -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,p -e,k,s,b,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,p -p,f,s,w,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,u -p,x,s,g,f,c,f,c,n,p,e,b,s,s,w,w,p,w,o,p,n,v,d -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,d -e,f,y,u,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,s,g,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,u -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,d -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,g -p,f,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,y,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,p -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,p -p,f,s,b,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,y,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -e,k,y,b,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -e,f,y,b,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,g -p,f,y,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,g -p,x,s,b,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,u -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,p -p,x,s,w,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,u -p,x,f,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,p -p,x,s,w,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,u -p,f,s,b,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,g -p,x,s,w,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,u -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,p -e,x,y,w,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,y,d -e,x,s,e,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,y,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,d -e,f,f,e,t,n,f,c,b,n,t,b,s,s,g,w,p,w,o,p,n,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,p -p,k,f,y,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,g -e,k,f,c,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,d -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,d -p,b,y,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -e,k,y,p,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,g -e,k,y,b,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,d -p,x,s,w,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,u -p,x,y,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,x,y,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,y,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,g,w,t,n,f,w,n,w,e,b,s,s,w,w,p,w,o,p,w,c,l -p,x,s,g,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,u -e,f,y,c,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,p -e,f,y,p,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,s,b,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,g -p,x,s,g,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,g -p,x,y,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -e,k,y,c,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -p,x,s,w,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,u -p,x,s,w,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,g -p,f,s,b,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,u -p,f,s,b,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,g -e,k,y,c,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,d -p,f,s,w,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,g -p,f,s,g,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,u -p,c,g,w,t,n,f,w,n,w,e,b,s,s,w,w,p,w,o,p,w,c,l -p,x,s,g,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,u -p,b,g,w,t,n,f,w,n,w,e,b,s,s,w,w,p,w,o,p,w,c,l -p,x,s,b,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,g -p,f,s,w,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,u -p,b,f,y,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,x,y,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,x,y,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -e,k,s,n,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -e,k,y,b,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,s,w,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,g -p,f,s,b,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,g -p,f,s,b,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,y,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,y,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,d -e,x,s,e,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,y,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,g -p,x,y,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -e,f,f,n,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -p,x,y,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -e,x,s,b,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -e,f,y,n,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,p -p,f,y,b,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,n,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,x,y,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,x,s,b,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,g -p,x,s,b,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,s,g,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,g -p,x,y,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,x,s,b,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,g -p,f,f,n,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -e,x,y,e,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,s,b,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,g -p,f,s,w,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,g -e,k,y,e,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,p -p,x,f,n,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -e,x,y,n,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,g -p,f,s,b,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,g -p,f,s,b,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,d -e,x,s,p,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -e,f,s,p,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,b,y,w,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -e,x,y,c,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -e,f,s,n,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,s,b,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,u -p,x,s,g,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,d -p,x,s,b,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,u -p,x,s,w,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,g -e,f,y,c,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -p,x,y,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,d -e,x,y,w,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,p -p,f,y,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,s,g,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,u -p,x,y,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -e,k,s,p,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,y,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,p -p,f,s,b,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,u -e,k,y,p,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,g -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,d -p,x,s,b,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,g -p,x,y,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,f,y,f,f,f,c,b,h,e,b,k,k,n,n,p,w,o,l,h,v,d -p,f,f,n,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -e,k,s,b,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,g -p,x,y,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,s,g,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,u -e,f,s,n,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,y,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,x,s,g,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,u -p,x,s,g,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,g -p,f,y,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -e,k,y,b,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,s,w,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,u -p,x,y,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -e,k,y,n,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,d -e,x,y,p,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,s,g,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,u -e,x,s,p,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,b,y,n,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,f,s,b,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,g -p,x,y,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,x,y,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,x,s,b,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,u -p,x,y,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -e,k,y,p,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -e,k,f,c,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,g -e,x,s,n,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,p -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,s,g,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,u -p,k,y,w,t,n,f,w,n,w,e,b,s,s,w,w,p,w,o,p,w,c,l -e,k,f,n,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -p,k,f,y,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,f,s,w,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,g -p,f,y,y,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,d -e,k,s,b,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,y,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,p -p,b,y,w,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,y,g -p,x,s,w,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,g -p,x,s,b,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,g -e,k,y,b,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,p -p,x,s,b,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,g -p,b,y,b,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,s,w,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,g -e,k,s,n,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,s,g,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,u -p,x,f,y,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,f,s,g,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,d -p,k,y,n,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,f,s,b,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,u -e,f,s,e,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,x,y,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,x,y,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,s,w,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,u -e,k,s,n,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,g -e,x,s,b,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,s,w,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,u -e,k,f,n,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -p,x,y,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,s,w,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,g -p,x,s,g,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,u -p,k,y,y,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,f,s,w,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,p -e,k,y,p,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,y,w,t,n,f,w,n,w,e,b,s,s,w,w,p,w,o,p,w,c,l -p,f,s,b,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,u -e,f,y,w,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,y,d -e,x,y,r,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,v,d -e,x,s,b,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,s,g,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,g -p,x,y,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,p -p,f,s,g,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,g -p,f,s,b,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,g -e,f,s,e,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,y,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,d -e,x,s,n,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,s,g,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,g -p,x,y,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,s,w,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,g -e,x,f,c,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -p,f,s,g,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,p -e,x,y,r,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,y,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,p -p,x,y,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,x,y,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -e,f,y,e,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,s,g,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,u -e,k,y,n,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -e,f,y,e,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,b,y,n,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,x,y,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -e,f,y,b,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,s,w,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,g -e,f,y,n,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,b,y,w,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -e,x,y,n,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,p -p,x,s,b,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,g -p,x,y,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,x,s,g,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,u -p,f,s,g,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,u -p,x,y,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -e,f,y,w,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,y,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,f,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,g -p,x,y,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,x,s,w,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,d -p,x,s,w,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,u -p,k,y,y,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,f,s,w,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,u -p,f,s,g,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,u -e,x,y,c,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,x,s,w,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,u -p,x,y,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,y,p,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,p -e,x,s,p,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,b,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,f,y,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,p -e,f,f,n,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -e,x,f,n,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,x,y,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,f,s,g,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,u -p,x,s,b,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,u -p,f,s,g,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,g -p,x,s,b,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,g -e,x,y,r,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,v,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,d -p,f,s,g,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,u -p,x,y,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,g -p,f,s,w,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,u -p,x,s,w,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,u -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,p -p,x,y,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,y,y,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -e,f,s,n,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,d -p,f,s,g,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,u -p,x,y,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,d -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,d -p,f,s,w,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,u -p,f,s,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,g -e,k,s,p,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,s,b,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,g -p,x,s,w,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,g -p,x,s,w,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,g -p,x,y,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,b,y,p,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,s,b,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,u -e,x,y,p,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,x,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,v,p -p,x,s,w,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,u -e,f,y,b,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,s,b,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,u -e,x,f,c,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -e,f,s,p,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -e,x,y,n,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -e,f,s,p,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,s,g,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,u -e,k,s,e,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -e,k,y,e,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,s,w,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,g -e,k,s,b,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,s,b,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,g -p,b,y,y,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,x,s,w,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,u -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,p -p,b,f,n,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,p -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,v,p -e,f,y,c,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -p,x,y,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,f,s,w,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,g -p,x,y,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,b,y,p,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,d -p,x,s,b,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,p -p,f,s,b,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,g -p,f,s,b,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,u -p,x,y,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,p -e,x,s,n,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,v,g -e,f,s,b,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -e,f,s,e,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -e,x,s,n,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,s,w,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,u -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,d -e,f,s,p,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -e,x,s,e,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,d -p,b,y,b,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,s,b,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,u -p,f,s,g,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,g -p,x,s,b,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,s,w,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,g -p,x,s,g,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,u -e,f,y,r,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,y,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,g -p,x,y,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,s,b,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,g -e,x,f,c,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,x,s,g,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,u -e,x,y,u,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,y,d -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,v,p -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,g -p,f,y,w,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,x,y,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,x,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,p -e,x,y,p,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,s,g,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,g -p,x,s,b,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,g -p,x,y,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,x,s,g,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,u -p,x,y,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,f,s,w,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,g -p,x,s,g,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,u -p,x,s,g,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,p -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,p -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,s,g,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,u -p,f,s,b,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,u -p,f,s,w,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,u -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,g -p,f,s,b,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,g -p,x,y,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,x,s,w,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,g -e,k,s,e,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,y,b,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -e,k,y,n,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,s,w,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,u -p,f,s,w,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,u -p,x,y,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,x,s,w,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,g -p,x,y,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,b,s,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,y,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,p -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,d -p,x,y,g,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,v,g -p,f,y,w,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,s,g,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,g -e,k,y,p,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,s,w,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,g -e,k,s,p,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,s,w,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,u -p,x,y,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,d -p,f,y,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,p -p,x,y,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,p -e,x,y,e,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,p -p,x,y,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,x,s,b,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,d -e,k,y,e,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,y,p,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,y,w,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,g -e,f,y,r,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,s,w,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,g -p,x,s,w,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,g -p,x,s,b,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,u -p,x,y,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -e,k,y,e,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -e,k,s,p,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -e,x,y,b,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,s,g,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,y,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,c,y,w,t,n,f,w,n,w,e,b,s,s,w,w,p,w,o,p,w,c,l -p,f,s,w,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,g -p,f,s,g,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,g -p,x,y,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,x,s,g,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,g -p,x,y,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -e,x,y,p,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -e,f,y,w,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,y,d -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,g -p,x,y,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,x,f,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,d -p,x,y,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,x,s,g,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,u -p,x,y,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -e,k,f,n,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -p,x,s,w,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,u -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,p -e,x,y,p,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,b,y,b,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,p -e,f,y,r,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,y,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,y,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,s,b,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,d -p,x,y,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -e,f,y,n,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,y,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,f,y,e,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -e,x,y,b,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -e,x,s,n,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,s,w,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,g -e,x,y,b,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,d -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,y,g -e,k,y,e,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,s,w,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,u -e,k,y,c,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -e,k,y,n,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -p,b,s,p,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,x,s,b,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,u -e,f,y,r,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,s,b,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,g -e,f,y,u,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,y,d -p,x,y,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,s,w,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,u -e,x,s,e,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -e,f,y,p,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,y,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,x,y,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,s,w,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,p -e,x,y,n,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -e,x,y,e,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,s,w,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,u -p,x,y,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,g -p,f,s,b,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,v,p -p,x,s,g,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,g -e,f,s,p,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -e,x,y,n,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,b,y,w,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,d -p,x,y,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,x,s,b,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,g -p,x,s,b,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,g -p,x,y,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,b,y,p,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -e,x,y,c,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -p,x,y,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,x,s,w,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,u -p,x,y,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -e,f,y,n,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,x,y,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,s,g,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,u -p,x,y,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -e,x,y,r,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,y,d -p,b,y,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -e,x,s,p,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,y,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,x,y,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,s,g,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,y,p,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,x,y,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -e,f,s,b,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,s,b,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,u -e,x,y,c,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -e,x,y,w,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,y,d -p,x,y,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,f,y,n,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -p,x,y,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,x,y,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -e,k,y,p,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,s,w,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,g -e,k,y,n,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -p,f,s,g,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,g -p,f,s,w,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,f,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,g -e,f,s,n,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,f,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,d -p,x,y,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,x,y,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,g -p,b,s,w,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,s,b,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,u -e,f,y,b,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -e,x,f,c,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -p,f,s,w,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,u -e,k,s,p,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -e,k,y,n,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -e,x,y,b,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,g -p,f,s,g,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,p -e,x,y,e,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,s,b,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,g -e,x,y,n,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,d -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,d -p,b,y,b,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,y,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,p -p,f,f,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,p -p,f,y,w,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -e,k,s,e,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,n,p,w,o,l,h,v,p -e,k,s,n,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,y,y,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,f,y,g,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,p -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,g -p,x,y,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,f,s,b,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,s,w,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,u -p,x,s,w,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,g -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,d -e,f,y,p,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,p -e,k,s,e,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -e,x,y,n,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,g -p,f,s,b,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,s,g,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,u -p,x,s,w,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,g -p,x,f,n,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,f,s,b,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,g -e,f,y,e,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,g -p,x,s,b,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,u -e,k,f,n,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,g -e,k,y,c,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -p,x,f,g,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,y,p -p,x,y,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -e,f,y,p,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,s,b,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,n,p,w,o,l,h,y,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,y,g -p,x,y,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -e,f,y,r,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,y,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,p -p,x,s,g,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,g -p,x,s,w,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,g -p,f,s,b,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,g -e,x,y,n,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,f,y,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -e,f,y,u,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,y,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -e,x,s,n,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,s,w,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,u -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,b,p,w,o,l,h,v,d -e,f,s,b,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -e,k,f,c,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -p,x,s,g,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,u -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,p -p,x,y,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -e,f,y,p,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -e,k,y,p,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,g -p,x,y,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -e,f,s,n,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,g -p,x,y,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -e,f,s,n,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,p,p,w,o,l,h,v,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,p -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,y,p -e,f,f,c,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,b,y,b,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -e,k,y,e,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,s,b,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,u -p,x,s,g,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,g -p,x,s,g,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,u -p,x,y,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,s,w,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,u -e,f,s,p,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,s,b,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,g -p,x,y,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -e,f,y,w,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,v,d -e,x,y,p,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,s,b,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,u -p,k,g,w,t,n,f,w,n,w,e,b,s,s,w,w,p,w,o,p,w,c,l -p,f,s,g,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,g -p,x,y,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -e,k,s,e,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,y,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,f,s,w,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,s,g -e,f,s,e,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,f,s,b,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,g -p,b,s,w,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -e,f,f,n,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,f,s,g,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,u -p,x,s,w,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,g -e,x,s,b,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -e,f,y,n,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -e,x,y,r,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,v,d -e,k,s,b,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,s,w,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,u -p,x,y,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -e,x,y,b,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,w,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,s,b,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,u -p,x,s,g,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,g -e,k,s,n,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,s,g,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,g -e,k,y,p,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,b,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,v,p -p,x,s,b,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,u -e,k,f,c,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -p,f,s,g,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,u -p,f,s,w,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,g -p,b,s,b,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -p,b,s,p,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,s,w,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,g -e,x,y,n,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,y,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,x,y,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,g -e,x,s,b,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,x,s,b,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,u -p,f,s,p,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,y,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,x,s,w,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,u -p,f,s,w,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,u -e,f,y,r,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,y,d -p,f,s,w,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,g -p,f,s,w,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,u -e,x,y,w,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,y,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -e,k,y,b,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,x,y,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,x,s,b,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,u -e,f,y,r,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,y,d -p,b,s,p,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,s,b,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,u -e,f,y,c,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -e,x,y,u,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,s,b,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,u -e,x,y,w,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,v,d -p,x,y,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -e,x,s,p,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,s,p,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,y,n,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,p -p,f,s,w,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,g -p,x,y,g,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,v,g -e,f,y,e,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,s,b,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -p,b,s,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,x,s,b,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,v,u -p,x,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,y,p -p,b,y,w,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -e,f,y,n,f,n,f,w,n,w,e,b,s,f,w,n,p,w,o,e,w,v,l -p,f,f,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,b,p,w,o,l,h,v,p -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,g -p,x,s,g,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,u -p,x,y,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,y,y,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,x,y,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,f,s,w,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,g -e,f,f,n,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -p,x,s,g,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,g -p,f,s,b,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,u -e,f,s,e,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,s,b,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,u -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,d -p,x,y,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,s,w,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,v,g -p,f,s,g,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,g -e,f,f,c,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -p,x,y,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -e,k,y,e,t,n,f,c,b,e,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,y,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,s,w,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,g -p,x,y,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -e,x,s,n,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,p -p,x,s,w,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,u -e,f,s,e,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,v,d -e,f,y,e,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,s,g,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,g -e,k,s,n,t,n,f,c,b,e,e,?,s,s,e,w,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,b,p,w,o,l,h,y,d -p,x,s,w,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,g -p,x,y,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,g -p,f,y,w,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,y,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -e,f,y,w,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,v,d -e,x,y,w,f,n,f,c,n,h,e,?,s,f,w,w,p,w,o,f,h,y,d -p,x,y,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,s,g,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,u -e,x,y,u,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,v,d -p,f,s,w,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,y,y,f,f,f,c,b,g,e,b,k,k,p,b,p,w,o,l,h,v,d -p,f,s,g,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,n,p,w,o,l,h,y,p -p,x,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,y,d -p,x,s,g,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,s,g -e,f,y,r,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,v,d -p,f,s,w,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,s,g -e,k,s,n,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,y,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,x,s,g,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,s,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,p,p,p,w,o,l,h,y,g -p,b,s,w,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,b,s,b,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,m -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,y,g,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,d -e,x,y,e,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -e,k,s,b,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,y,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -e,x,s,e,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,s,b,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,m -e,f,s,n,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -e,f,y,b,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,s,g,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,u -p,f,s,b,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,y,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,x,y,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,y,y,f,f,f,c,b,h,e,b,k,k,b,b,p,w,o,l,h,y,g -p,x,y,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,y,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,f,s,g,t,f,f,c,b,w,t,b,f,s,w,w,p,w,o,p,h,v,u -e,x,y,p,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -e,x,y,u,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,y,d -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,y,p -p,x,s,g,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,g -p,x,y,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,f,s,g,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,u -p,x,s,b,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,b,p,p,w,o,l,h,y,g -p,b,s,w,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -e,k,y,n,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,x,y,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,v,p -e,x,y,b,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -p,x,s,b,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,g -p,f,s,g,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,u -p,f,s,b,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,u -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,b,p,w,o,l,h,y,d -p,x,y,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,s,w,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,u -e,k,y,n,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -e,x,s,e,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -e,x,y,w,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,v,d -p,f,s,b,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,u -p,f,s,g,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,s,u -e,f,s,e,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,y,g -p,x,y,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,s,w,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,y,g,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,d -e,x,f,n,f,n,f,w,n,w,e,b,s,s,w,n,p,w,o,e,w,v,l -p,f,y,y,f,f,f,c,b,p,e,b,k,k,b,n,p,w,o,l,h,y,d -p,x,s,w,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,u -e,x,s,b,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -e,x,s,p,t,n,f,c,b,w,e,?,s,s,w,e,p,w,t,e,w,c,w -p,x,s,g,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,g -p,f,f,g,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,p -p,x,s,w,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,g -p,x,f,y,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,v,p -p,f,s,w,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,u -e,k,y,n,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -e,x,y,p,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,y,g -e,f,y,b,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -e,x,s,e,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,s,g,t,f,f,c,b,p,t,b,f,f,w,w,p,w,o,p,h,v,u -p,f,s,b,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,u -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,p -p,x,s,b,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,u -p,f,s,w,t,f,f,c,b,h,t,b,s,s,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,n,p,p,w,o,l,h,y,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,n,p,p,w,o,l,h,y,p -p,f,f,g,f,f,f,c,b,g,e,b,k,k,b,n,p,w,o,l,h,v,p -p,b,s,p,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,b,s,w,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -e,x,y,n,t,n,f,c,b,w,e,?,s,s,w,w,p,w,t,e,w,c,w -p,x,y,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,x,y,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,x,y,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,f,s,b,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,g -p,b,y,y,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,x,s,g,t,f,f,c,b,w,t,b,f,f,w,w,p,w,o,p,h,v,u -e,f,y,p,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -e,k,s,p,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -e,f,y,n,t,n,f,c,b,w,e,?,s,s,e,e,p,w,t,e,w,c,w -p,b,y,p,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,s,g,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,v,g -p,x,s,g,t,f,f,c,b,p,t,b,s,s,w,w,p,w,o,p,h,s,g -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,g -p,x,y,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,f,y,b,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,r,v,g -p,x,s,b,t,f,f,c,b,h,t,b,f,f,w,w,p,w,o,p,h,v,u -p,b,s,b,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -e,f,y,w,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,y,d -e,x,y,n,f,n,f,w,n,w,e,b,f,s,w,n,p,w,o,e,w,v,l -p,x,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,v,p -p,f,s,b,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,v,g -e,k,s,e,t,n,f,c,b,e,e,?,s,s,e,e,p,w,t,e,w,c,w -p,f,f,y,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,p -p,b,f,n,f,n,f,c,n,w,e,?,k,y,w,n,p,w,o,e,w,v,d -p,x,y,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,f,y,y,f,f,f,c,b,h,e,b,k,k,n,b,p,w,o,l,h,y,d -p,x,y,y,f,f,f,c,b,g,e,b,k,k,n,b,p,w,o,l,h,y,g -p,x,s,g,t,f,f,c,b,p,t,b,f,s,w,w,p,w,o,p,h,s,g -p,x,y,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -e,x,y,n,t,n,f,c,b,w,e,?,s,s,e,w,p,w,t,e,w,c,w -e,f,y,n,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -p,f,y,b,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -p,k,f,n,f,n,f,c,n,w,e,?,k,y,w,y,p,w,o,e,w,v,d -p,f,y,g,f,f,f,c,b,h,e,b,k,k,p,n,p,w,o,l,h,v,g -p,x,y,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,f,f,g,f,f,f,c,b,h,e,b,k,k,p,p,p,w,o,l,h,y,d -p,f,f,g,f,f,f,c,b,p,e,b,k,k,b,p,p,w,o,l,h,v,d -p,x,s,g,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,s,g -e,x,y,r,f,n,f,c,n,u,e,?,s,f,w,w,p,w,o,f,h,y,d -p,x,y,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,f,s,p,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,m -p,x,y,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,s,b,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,g -p,x,s,b,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,v,u -e,k,s,e,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -p,f,s,b,t,f,f,c,b,w,t,b,s,f,w,w,p,w,o,p,h,s,g -e,x,f,n,f,n,f,w,n,w,e,b,f,f,w,n,p,w,o,e,w,v,l -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,v,d -e,f,y,w,f,n,f,c,n,w,e,?,s,f,w,w,p,w,o,f,h,y,d -p,f,s,w,t,f,f,c,b,w,t,b,s,s,w,w,p,w,o,p,h,s,u -p,f,s,p,t,n,f,c,b,r,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,y,y,f,f,f,c,b,p,e,b,k,k,p,n,p,w,o,l,h,y,d -p,f,s,g,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,g -p,x,y,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,b,p,w,o,l,h,v,g -p,f,s,g,t,f,f,c,b,h,t,b,f,s,w,w,p,w,o,p,h,s,g -p,f,s,w,t,f,f,c,b,h,t,b,s,f,w,w,p,w,o,p,h,s,u -p,x,y,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,f,f,y,f,f,f,c,b,p,e,b,k,k,n,n,p,w,o,l,h,y,d -p,x,y,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,b,y,b,t,n,f,c,b,g,e,b,s,s,w,w,p,w,t,p,r,v,g -p,f,y,y,f,f,f,c,b,g,e,b,k,k,b,p,p,w,o,l,h,v,g -p,x,y,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,x,y,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -e,k,y,b,t,n,f,c,b,e,e,?,s,s,w,w,p,w,t,e,w,c,w -e,x,y,w,f,n,f,c,n,p,e,?,s,f,w,w,p,w,o,f,h,y,d -p,f,s,b,t,f,f,c,b,p,t,b,s,f,w,w,p,w,o,p,h,v,u -p,f,y,y,f,f,f,c,b,g,e,b,k,k,n,p,p,w,o,l,h,y,g -p,x,y,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,x,s,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,k,s,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,y,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,n,c,l -p,f,s,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,n,v,l -p,f,y,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,x,y,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,s,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,s,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,k,s,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,k,s,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,y,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -e,x,f,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -p,f,y,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,f,s,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,k,s,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,y,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,s,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,k,y,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,s,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,f,s,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,x,y,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,y,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,f,y,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,y,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,s,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,k,s,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -e,x,s,c,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,s,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,k,s,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,f,y,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,s,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,y,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -e,b,f,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -p,x,y,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -e,k,f,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -p,x,s,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,x,s,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,k,s,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,f,s,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,k,s,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,y,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -e,b,s,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -p,x,y,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,f,y,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -e,k,s,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -p,f,y,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,y,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -e,b,f,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,y,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,k,s,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,s,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -e,k,s,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -p,f,s,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,k,y,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,k,y,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,s,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,y,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,x,y,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,f,s,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,x,s,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,k,y,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,x,y,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,s,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,k,y,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -e,k,s,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -p,x,s,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,s,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -e,b,s,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -p,f,s,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,f,s,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,y,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,s,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,y,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,k,s,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,x,y,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,k,y,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,x,s,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,x,s,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -e,x,f,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -p,f,y,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,x,s,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,y,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,x,s,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,s,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,s,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,s,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,x,y,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -e,k,f,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -p,x,s,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,s,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,o,v,l -p,f,s,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,s,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,k,y,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,f,y,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,x,s,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,k,y,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -e,x,s,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -p,f,y,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,y,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,s,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -e,k,f,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -p,f,s,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -e,b,f,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -p,f,s,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,s,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,s,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,x,y,e,f,m,f,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,f,y,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,y,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,y,c,l -p,f,y,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,x,y,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,y,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,o,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,s,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,s,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,k,y,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,x,s,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -e,k,s,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -p,f,s,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,y,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,y,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,x,s,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,s,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,s,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,f,s,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,f,y,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,y,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,x,y,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -e,x,f,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -p,x,s,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -e,x,f,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -p,f,y,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,y,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,x,s,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,f,y,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -e,k,f,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -p,k,y,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,x,s,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,x,y,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,y,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,b,v,l -p,x,s,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,k,s,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,s,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,f,y,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,s,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,k,y,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,k,y,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,y,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,s,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,s,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -e,x,s,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -p,k,y,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,k,y,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,k,s,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,f,s,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,f,s,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,x,s,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,s,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,k,y,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,f,y,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,y,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,k,s,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -e,b,f,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -p,x,s,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,k,s,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,y,v,l -p,f,s,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,y,c,f,m,a,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,x,s,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,s,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,s,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,f,s,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,s,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,s,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,f,s,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,s,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,s,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,s,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,x,s,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,x,s,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,x,s,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,x,s,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,y,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,y,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,f,s,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -e,x,f,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -p,x,s,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,x,s,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -e,b,f,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -p,f,y,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,s,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -e,k,f,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,o,c,l -p,f,s,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,s,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,y,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,s,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,y,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -e,x,f,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -p,f,s,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,s,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,x,y,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,s,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,s,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,s,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,s,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,y,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,f,y,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,s,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,x,s,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,k,y,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -e,x,s,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,y,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,s,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,f,s,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,x,s,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,k,y,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,k,y,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,o,c,l -p,x,s,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,k,y,n,f,m,f,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,x,s,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -e,k,s,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -p,f,y,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,s,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,x,s,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -e,k,f,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -p,x,s,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,k,y,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,s,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,f,y,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,x,y,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,y,v,l -p,k,s,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,k,y,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -e,x,y,c,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,y,c,l -e,k,s,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -e,x,f,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -p,x,s,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,k,y,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,o,c,l -p,k,s,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,b,y,y,f,n,f,w,n,y,e,c,y,y,y,y,p,y,o,e,w,c,l -p,x,s,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -e,b,s,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,y,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -e,k,f,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,s,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,f,s,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -e,b,f,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -p,f,y,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,n,c,l -e,x,y,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -p,f,s,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,k,y,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -e,b,s,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -p,x,y,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,f,y,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,k,y,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -e,x,f,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -e,b,s,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,s,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -e,f,y,n,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,k,s,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,f,y,e,f,m,a,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,x,s,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,k,s,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,k,y,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -e,x,f,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,s,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,k,s,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,k,y,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -e,k,f,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,s,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,k,y,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,k,s,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,k,s,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,x,y,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,y,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -e,b,s,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,d -p,f,s,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -e,x,s,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,d -p,f,s,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,x,s,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -e,k,f,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -p,f,y,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,x,s,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,s,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,k,s,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -e,b,s,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,p -p,k,s,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,s,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,n,c,l -e,k,s,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,y,e,f,m,f,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,y,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,o,c,l -p,f,y,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -e,k,f,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -e,x,s,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,o,v,l -p,k,y,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,k,y,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,k,s,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -e,x,s,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -e,k,s,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -e,k,s,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,s,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,x,s,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -e,k,s,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -e,x,f,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,y,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -e,b,s,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,s,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,f,s,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -e,k,s,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -e,x,s,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -p,f,y,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -e,x,f,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -e,k,f,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -p,f,s,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -e,f,s,g,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,b,c,l -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,n,c,l -p,k,y,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,k,s,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -e,k,f,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,n,c,l -p,k,y,c,f,m,f,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -e,x,f,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,o,v,l -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,b,v,l -p,k,y,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -e,k,f,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -e,x,f,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,n,v,l -p,k,y,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,k,y,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,y,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -e,k,f,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -p,f,y,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -e,k,s,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,y,c,l -p,k,y,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,x,s,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,o,v,l -p,k,s,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,x,s,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -e,x,s,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -p,f,s,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,x,y,n,f,m,a,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,s,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -e,k,s,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,s,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,f,y,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,s,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,k,y,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -e,f,s,c,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -p,f,y,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,k,s,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,x,s,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,x,s,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,n,c,l -p,f,s,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,x,s,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -e,b,f,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -e,k,f,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,n,v,l -p,f,y,n,f,m,f,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,s,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,f,s,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,k,y,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,k,y,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -e,x,f,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,y,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,x,y,c,f,m,a,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,y,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,k,y,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,b,s,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,s,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,x,s,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,s,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,k,y,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -e,b,f,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,y,n,f,m,a,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,f,y,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,k,y,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -e,x,y,g,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,f,s,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,k,y,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -e,k,f,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -p,x,s,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,s,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -e,k,s,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -p,f,y,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,y,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,y,v,l -p,f,y,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,k,y,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,k,s,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,k,s,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,b,v,l -e,k,s,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,n,v,l -e,x,f,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,s,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,y,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,b,s,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -p,f,s,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -e,b,f,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -p,x,s,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,f,y,c,f,m,f,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,f,s,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,x,y,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,s,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,f,y,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,f,s,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,x,s,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -e,k,s,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -p,x,s,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,k,y,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,x,s,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -e,b,s,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -p,x,s,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,f,s,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -e,b,s,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -p,x,s,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,y,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,f,y,n,f,m,f,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -e,b,f,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -p,f,s,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,k,s,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -e,k,s,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -p,k,y,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,x,s,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,y,c,l -p,k,y,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,b,c,l -p,k,s,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,k,s,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,k,s,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -e,f,y,c,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,n,c,l -e,b,s,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,s,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -e,b,s,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,y,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,y,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,y,c,l -p,x,s,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,f,s,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,y,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -e,k,y,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,d -p,f,s,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,f,y,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,f,s,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -e,k,f,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,s,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -e,b,f,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,s,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,x,s,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,y,c,l -p,k,s,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,k,y,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -e,k,f,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -p,x,s,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,k,y,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -e,x,f,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,y,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,k,s,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,k,y,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,k,y,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -e,k,s,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,y,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -e,x,s,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,y,v,l -e,k,s,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -p,x,s,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,k,s,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -e,x,s,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,y,v,l -e,k,s,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -p,f,y,e,f,m,f,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -e,b,s,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -p,f,y,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,b,c,l -e,b,s,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,y,c,l -e,b,s,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,s,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,o,c,l -p,k,s,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,k,s,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,y,v,l -e,k,f,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,o,c,l -p,k,s,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,k,y,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,k,y,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,n,c,l -p,k,s,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,o,v,l -p,k,s,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -e,k,s,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -e,k,f,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -p,f,s,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,x,s,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,y,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,f,s,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -e,x,f,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,y,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,y,n,f,m,a,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -e,b,f,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,y,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,s,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -e,k,f,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -e,k,f,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -e,k,s,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,s,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -e,k,s,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,b,s,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,s,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -e,x,s,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,n,v,l -p,k,s,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,k,y,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -e,x,f,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -p,x,y,e,f,m,a,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,s,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,s,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,b,c,l -p,k,y,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -e,f,y,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,d -e,k,s,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -p,f,y,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,k,y,y,f,n,f,w,n,y,e,c,y,y,y,y,p,y,o,e,w,c,l -p,k,y,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,k,y,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,n,c,l -e,x,s,g,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -e,k,s,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -p,f,y,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,k,y,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -e,b,f,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,o,c,l -p,k,s,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -e,x,f,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -e,x,f,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,k,y,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -e,x,s,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -e,x,s,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -p,k,y,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,k,s,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -e,b,f,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -p,f,s,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,x,y,n,f,m,f,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,y,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,y,c,l -p,x,y,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,y,v,l -e,b,s,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -e,b,s,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,y,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,k,s,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,k,s,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -e,b,f,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -e,k,s,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -p,f,y,e,f,m,f,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,y,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,f,s,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,k,y,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,k,y,c,f,m,f,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,y,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -e,k,s,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -p,f,y,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -e,b,f,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -p,f,s,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,k,s,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -e,k,s,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -e,x,s,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,s,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,y,c,l -e,f,y,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -e,b,s,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -p,f,y,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,s,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -e,k,f,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -e,x,f,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -e,x,s,c,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -e,k,f,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,s,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,k,y,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,x,y,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,o,c,l -p,f,y,y,f,n,f,w,n,w,e,c,y,y,y,y,p,y,o,e,w,c,l -p,x,y,c,f,m,a,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,x,s,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -e,x,y,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,k,y,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -e,k,s,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -e,b,s,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,s,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,b,v,l -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,b,v,l -e,f,s,n,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -e,k,s,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -p,f,s,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,k,y,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,k,s,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -e,b,s,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,y,n,f,m,f,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,y,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,y,v,l -p,k,s,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -e,b,s,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -e,x,s,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,s,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,k,s,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,k,y,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -e,b,s,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,y,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -e,b,f,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -p,c,y,y,f,n,f,w,n,y,e,c,y,y,y,y,p,y,o,e,w,c,l -p,x,s,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,k,y,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -e,x,s,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,y,v,l -p,k,y,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,n,c,l -p,k,y,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,k,y,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,k,s,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -e,b,s,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,n,c,l -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,n,v,l -p,x,s,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -e,x,s,g,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -e,x,f,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,s,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -e,k,f,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,s,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,k,y,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,k,y,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -e,f,s,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -p,k,y,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,k,y,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,b,c,l -p,k,y,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,y,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,k,y,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,o,c,l -p,f,s,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -e,k,s,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,s,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,k,s,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,o,v,l -p,k,s,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -e,x,f,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,o,v,l -p,f,s,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,k,s,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -e,f,y,n,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -e,x,f,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -e,k,s,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,b,c,l -p,k,y,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,k,y,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -e,k,y,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,p -e,x,f,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,s,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,k,y,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,b,v,l -p,f,y,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,k,s,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,y,c,l -e,x,f,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,s,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,k,y,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -e,x,s,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,y,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,k,s,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,k,y,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,o,c,l -e,b,s,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -p,x,y,e,f,m,a,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,s,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,y,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,k,s,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,x,s,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -e,b,f,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,y,n,f,m,a,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -e,f,s,n,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,y,y,f,n,f,w,n,y,e,c,y,y,y,y,p,y,o,e,w,c,l -p,k,s,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,f,y,e,f,m,a,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -e,x,f,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,y,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,f,y,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,y,c,l -p,k,y,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,y,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,k,s,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,k,y,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -e,x,s,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -e,x,s,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -e,x,f,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,s,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,k,s,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,k,y,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,k,y,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,k,y,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,k,s,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,o,c,l -p,f,s,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,x,s,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -e,x,f,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,y,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,o,c,l -p,k,y,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,k,y,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,k,y,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,k,y,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,k,s,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,b,c,l -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,n,c,l -e,b,s,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,s,n,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,n,c,l -p,k,y,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -e,b,f,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -e,b,s,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -p,x,s,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,b,c,l -e,x,s,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -e,x,s,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,p -e,k,s,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -e,x,s,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -p,f,y,c,f,m,f,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,y,c,l -p,k,s,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,k,y,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -e,x,s,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -e,k,f,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -e,b,f,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,b,v,l -p,k,y,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,n,v,l -e,f,s,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,k,y,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -e,b,s,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,s,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -e,k,s,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,y,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,k,s,n,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,b,c,l -p,k,s,n,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -p,k,y,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,y,v,l -e,x,f,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -e,x,f,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,b,v,l -p,k,s,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,k,y,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -e,x,s,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -e,x,f,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,s,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -p,k,s,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -e,k,s,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,p -p,k,s,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,k,s,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -e,b,s,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,y,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,k,s,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -e,k,f,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,y,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,k,s,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,k,y,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -e,k,f,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -p,k,y,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -e,b,f,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -e,x,s,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -e,b,s,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,k,y,e,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -e,b,s,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -p,f,s,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -e,k,s,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,y,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,k,s,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,k,s,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -e,x,s,n,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,k,s,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,b,y,y,f,n,f,w,n,w,e,c,y,y,y,y,p,y,o,e,w,c,l -p,k,y,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -e,x,s,n,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -p,k,s,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -e,k,f,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,s,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,k,s,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -e,x,f,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -e,b,s,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,y,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,n,c,l -p,k,y,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -e,b,s,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,s,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,o,v,l -p,f,s,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -e,b,f,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -e,k,s,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,s,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -e,x,s,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,b,c,l -p,k,s,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,k,y,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,b,v,l -e,b,f,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -e,x,s,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -p,f,y,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,x,y,e,f,m,f,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,y,c,l -p,k,s,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -e,b,s,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -e,b,s,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -e,k,f,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -e,k,f,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -e,x,f,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,y,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,f,y,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,k,y,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,f,s,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,b,s,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -e,k,s,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -e,b,f,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,b,c,l -p,k,y,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,f,y,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,x,s,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,o,v,l -e,k,f,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,s,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -e,b,f,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,y,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,y,c,l -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,n,v,l -p,k,y,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,k,y,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -p,k,y,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,k,y,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -e,x,s,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,y,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -e,f,y,c,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,n,v,l -p,k,s,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,k,s,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -e,b,f,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,y,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -e,b,f,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,s,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -e,x,s,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,y,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,k,y,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -e,x,s,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,y,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,k,y,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,o,v,l -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,n,v,l -p,k,s,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,k,s,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,n,c,l -p,k,s,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,k,s,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,k,y,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,k,y,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -e,k,f,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,n,v,l -p,k,s,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,b,v,l -e,f,y,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,p -p,k,y,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -e,f,s,g,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -e,x,s,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,c,y,y,f,n,f,w,n,w,e,c,y,y,y,y,p,y,o,e,w,c,l -p,k,s,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,k,y,e,f,m,a,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,o,v,l -p,k,s,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -e,x,s,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -p,x,s,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,y,c,f,m,f,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,f,s,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,b,c,l -e,b,y,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,d -p,f,s,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -e,b,s,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -e,x,y,n,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,f,s,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,y,c,l -e,b,f,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -e,x,f,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,s,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,x,y,n,f,m,a,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,y,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,b,c,l -p,k,s,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,x,s,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,k,s,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,k,y,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -p,k,s,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -e,b,f,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,y,v,l -p,k,y,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,k,y,y,f,n,f,w,n,w,e,c,y,y,y,y,p,y,o,e,w,c,l -e,k,s,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,y,c,l -p,k,s,n,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,k,f,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -e,k,f,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,b,c,l -e,x,s,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -e,b,s,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -e,x,f,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -e,k,f,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,s,n,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -e,k,f,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -e,x,s,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,s,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,k,y,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,n,v,l -p,k,s,e,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -e,x,s,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,y,v,l -e,b,s,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,s,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,n,v,l -p,k,s,n,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,k,s,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -e,b,f,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,y,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,k,s,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -e,b,s,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,s,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,x,s,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -e,x,s,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -e,x,s,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,y,n,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -e,k,s,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,d -p,k,s,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,o,v,l -e,b,f,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -p,k,s,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,k,s,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -p,f,y,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,f,y,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,y,c,l -p,k,y,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -e,k,s,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,b,c,l -p,k,y,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -e,k,s,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -p,k,y,e,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -e,x,s,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,s,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,k,y,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,f,y,c,f,m,a,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,x,s,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -e,k,f,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,s,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,x,y,n,f,m,f,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,f,y,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,b,c,l -p,k,s,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -e,b,f,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -e,b,f,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,y,v,l -p,k,y,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,o,c,l -e,x,s,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,s,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,k,s,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,p -e,x,s,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -e,b,s,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,y,n,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -p,k,y,e,f,m,f,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,y,e,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,k,y,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -e,x,f,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,b,v,l -p,k,y,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,y,v,l -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,n,v,l -e,k,s,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,y,c,l -p,k,y,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,b,v,l -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,n,c,l -p,k,y,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,d -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,n,c,l -p,k,s,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -e,b,f,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,y,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,b,v,l -p,k,s,n,f,s,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,l -e,x,s,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -e,x,s,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,b,v,l -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,b,c,l -p,k,y,e,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -e,x,f,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,s,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,k,y,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,k,y,n,f,s,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,k,y,e,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -e,x,s,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,s,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,k,s,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -e,x,f,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,o,c,l -e,b,s,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,s,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,k,y,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,k,s,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,x,y,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,b,v,l -e,x,s,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,s,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,k,y,e,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -e,k,s,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,b,v,l -e,k,s,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,n,g -p,k,s,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -p,k,s,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,x,s,n,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,n,c,l -p,k,s,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -p,k,y,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -e,x,f,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -p,f,s,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,k,s,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,b,v,l -e,x,y,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,d -p,k,y,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,s,e,f,s,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,l -e,b,s,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,y,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,n,c,l -e,x,s,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -e,x,s,g,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -p,f,y,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,f,y,n,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -e,f,y,g,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -e,x,f,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,y,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,k,s,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -p,x,s,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -e,x,f,g,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,s,g -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,n,v,l -p,x,y,e,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,b,f,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,s,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -e,x,f,g,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,s,g -p,f,y,n,f,m,a,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,o,v,l -e,b,s,g,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,o,v,l -p,k,s,e,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,o,v,l -e,x,f,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -p,k,s,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -e,x,f,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -e,f,y,p,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,k,y,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -e,k,f,g,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,y,c,l -p,k,s,n,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -p,k,s,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,k,y,n,f,s,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,d -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,o,c,l -p,k,y,n,f,s,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -e,k,f,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,y,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -e,f,s,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,p -p,k,s,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -e,x,f,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,y,e,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,d -p,k,y,e,f,y,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,o,c,l -e,k,s,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -e,b,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,o,c,l -e,f,y,g,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -p,k,y,e,f,m,a,c,b,w,e,c,k,y,c,c,p,w,n,n,w,c,d -p,x,y,e,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,y,v,l -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,o,c,l -e,x,y,c,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,l -p,k,s,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -p,k,y,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,k,y,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -e,f,s,c,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,v,p -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,n,c,l -e,b,f,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,n,g -e,k,f,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,o,c,l -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,o,c,l -e,k,f,w,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -p,k,y,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,l -e,b,f,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,y,n,f,y,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,k,y,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,y,v,l -e,x,y,n,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -e,b,s,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,n,g -p,f,y,n,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,k,s,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,y,c,l -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,n,v,l -e,b,s,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -p,x,y,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -e,b,f,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -e,b,f,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -e,b,f,w,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,o,v,l -p,k,y,e,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,p -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,o,v,l -p,k,s,e,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,d -p,k,y,c,f,m,a,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -p,k,s,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -e,x,s,g,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,n,g -e,f,s,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,d -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,y,c,l -e,b,y,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,p -p,x,s,n,f,f,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,b,c,l -p,f,s,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -p,k,y,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -e,k,s,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,y,n,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -e,k,f,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,s,g -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,o,c,l -p,k,s,e,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,y,v,l -p,k,s,n,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,k,y,n,f,s,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,n,v,l -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,n,c,l -e,x,y,n,f,n,f,c,b,w,e,b,y,y,n,n,p,w,t,p,w,y,p -e,x,s,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,o,v,l -p,f,s,n,f,f,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,x,y,e,f,s,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,d -p,k,y,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,k,s,e,f,f,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,k,s,e,f,f,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,p -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,n,v,l -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,y,v,l -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,y,v,l -p,k,s,n,f,s,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,b,v,l -e,k,s,g,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,s,g -e,k,f,w,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -p,k,s,e,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,p -e,b,f,w,f,n,f,w,b,g,e,?,k,k,w,w,p,w,t,p,w,n,g -e,x,s,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -e,b,f,w,f,n,f,w,b,p,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,y,n,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,d -p,f,s,n,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -p,f,y,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,k,s,e,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,p -p,k,y,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,y,c,l -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,n,v,l -p,k,y,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,l -p,k,s,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -e,x,f,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,n,g -e,k,s,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -p,k,y,e,f,f,f,c,n,b,t,?,s,k,p,p,p,w,o,e,w,v,p -p,x,s,n,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,l -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,b,c,l -e,b,f,w,f,n,f,w,b,p,e,?,k,s,w,w,p,w,t,p,w,n,g -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,b,v,l -p,k,y,e,f,y,f,c,n,b,t,?,k,k,p,w,p,w,o,e,w,v,d -e,x,y,g,t,n,f,c,b,w,e,b,s,s,w,w,p,w,t,p,w,y,p -p,k,s,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,y,v,l -p,k,y,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,b,c,l -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,b,v,l -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,y,c,l -p,k,y,e,f,y,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -e,b,f,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,s,e,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,k,s,n,f,y,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,l -p,k,s,n,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,d -e,x,f,w,f,n,f,w,b,w,e,?,s,k,w,w,p,w,t,p,w,s,g -e,f,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,o,v,l -p,k,y,n,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -e,x,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,n,v,l -e,b,f,g,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,y,v,l -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,o,v,l -p,k,s,e,f,s,f,c,n,b,t,?,s,k,w,p,p,w,o,e,w,v,l -p,k,s,n,f,f,f,c,n,b,t,?,s,s,w,p,p,w,o,e,w,v,p -p,k,s,n,f,s,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,s,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,k,y,n,f,y,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -e,k,f,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,f,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,l -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,b,v,l -p,k,s,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -e,b,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,b,c,l -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,b,c,l -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,n,c,l -p,k,y,e,f,s,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,l -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,y,v,l -p,k,y,e,f,f,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,p -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,y,v,l -e,b,f,g,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,n,g -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,o,c,l -e,b,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,y,c,l -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,o,v,l -e,b,f,g,f,n,f,w,b,g,e,?,s,s,w,w,p,w,t,p,w,s,g -p,k,y,e,f,f,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -p,k,s,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,k,s,w,w,p,w,o,e,w,v,p -p,k,s,e,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,p -p,k,y,n,f,y,f,c,n,b,t,?,s,s,w,w,p,w,o,e,w,v,l -e,b,f,g,f,n,f,w,b,p,e,?,k,k,w,w,p,w,t,p,w,s,g -e,k,f,w,f,n,f,w,b,g,e,?,s,k,w,w,p,w,t,p,w,s,g -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,n,o,p,o,v,l -p,x,s,e,f,f,f,c,n,b,t,?,k,s,w,p,p,w,o,e,w,v,p -e,k,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,n,v,l -p,k,y,e,f,f,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -p,k,s,n,f,f,f,c,n,b,t,?,k,s,p,p,p,w,o,e,w,v,d -p,k,y,e,f,f,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,p -p,k,y,e,f,y,f,c,n,b,t,?,s,s,p,p,p,w,o,e,w,v,p -p,x,s,n,f,y,f,c,n,b,t,?,k,k,w,w,p,w,o,e,w,v,d -e,b,s,g,f,n,f,w,b,g,e,?,k,s,w,w,p,w,t,p,w,n,g -p,x,y,c,f,m,f,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -e,k,f,w,f,n,f,w,b,w,e,?,k,s,w,w,p,w,t,p,w,n,g -p,k,y,n,f,s,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,l -p,k,s,e,f,y,f,c,n,b,t,?,k,k,w,p,p,w,o,e,w,v,d -e,k,f,w,f,n,f,w,b,w,e,?,k,k,w,w,p,w,t,p,w,s,g -e,f,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,b,v,l -p,k,s,e,f,s,f,c,n,b,t,?,s,s,p,w,p,w,o,e,w,v,p -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,n,c,l -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,o,c,l -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,n,v,l -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,y,v,l -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,n,v,l -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,n,c,l -p,k,y,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,l -e,b,s,w,f,n,f,w,b,w,e,?,s,s,w,w,p,w,t,p,w,n,g -e,x,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,o,o,p,n,v,l -e,k,s,w,f,n,f,w,b,p,e,?,s,s,w,w,p,w,t,p,w,n,g -e,k,s,n,f,n,a,c,b,o,e,?,s,s,o,o,p,n,o,p,b,v,l -p,k,y,e,f,y,f,c,n,b,t,?,k,k,p,p,p,w,o,e,w,v,d -p,f,y,c,f,m,a,c,b,y,e,c,k,y,c,c,p,w,n,n,w,c,d -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,o,v,l -p,k,y,n,f,s,f,c,n,b,t,?,s,k,p,w,p,w,o,e,w,v,l -p,k,s,e,f,y,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -p,k,y,n,f,f,f,c,n,b,t,?,k,s,p,w,p,w,o,e,w,v,d -e,k,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,b,c,l -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,n,o,p,b,v,l -e,f,s,n,f,n,a,c,b,n,e,?,s,s,o,o,p,o,o,p,b,c,l -p,k,y,n,f,y,f,c,n,b,t,?,s,k,w,w,p,w,o,e,w,v,l -e,x,s,n,f,n,a,c,b,y,e,?,s,s,o,o,p,o,o,p,o,c,l diff --git a/ml-xgboost/demo/binary_classification/agaricus-lepiota.fmap b/ml-xgboost/demo/binary_classification/agaricus-lepiota.fmap deleted file mode 100644 index e1efc28..0000000 --- a/ml-xgboost/demo/binary_classification/agaricus-lepiota.fmap +++ /dev/null @@ -1,32 +0,0 @@ - 1. cap-shape: bell=b,conical=c,convex=x,flat=f,knobbed=k,sunken=s - 2. cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s - 3. cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r,pink=p,purple=u,red=e,white=w,yellow=y - 4. bruises?: bruises=t,no=f - 5. odor: almond=a,anise=l,creosote=c,fishy=y,foul=f, - musty=m,none=n,pungent=p,spicy=s - 6. gill-attachment: attached=a,descending=d,free=f,notched=n - 7. gill-spacing: close=c,crowded=w,distant=d - 8. gill-size: broad=b,narrow=n - 9. gill-color: black=k,brown=n,buff=b,chocolate=h,gray=g, - green=r,orange=o,pink=p,purple=u,red=e, - white=w,yellow=y - 10. stalk-shape: enlarging=e,tapering=t - 11. stalk-root: bulbous=b,club=c,cup=u,equal=e, - rhizomorphs=z,rooted=r,missing=? - 12. stalk-surface-above-ring: fibrous=f,scaly=y,silky=k,smooth=s - 13. stalk-surface-below-ring: fibrous=f,scaly=y,silky=k,smooth=s - 14. stalk-color-above-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, - pink=p,red=e,white=w,yellow=y - 15. stalk-color-below-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, - pink=p,red=e,white=w,yellow=y - 16. veil-type: partial=p,universal=u - 17. veil-color: brown=n,orange=o,white=w,yellow=y - 18. ring-number: none=n,one=o,two=t - 19. ring-type: cobwebby=c,evanescent=e,flaring=f,large=l, - none=n,pendant=p,sheathing=s,zone=z - 20. spore-print-color: black=k,brown=n,buff=b,chocolate=h,green=r, - orange=o,purple=u,white=w,yellow=y - 21. population: abundant=a,clustered=c,numerous=n, - scattered=s,several=v,solitary=y - 22. habitat: grasses=g,leaves=l,meadows=m,paths=p, - urban=u,waste=w,woods=d diff --git a/ml-xgboost/demo/binary_classification/agaricus-lepiota.names b/ml-xgboost/demo/binary_classification/agaricus-lepiota.names deleted file mode 100644 index 4f1f3b5..0000000 --- a/ml-xgboost/demo/binary_classification/agaricus-lepiota.names +++ /dev/null @@ -1,148 +0,0 @@ -1. Title: Mushroom Database - -2. Sources: - (a) Mushroom records drawn from The Audubon Society Field Guide to North - American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred - A. Knopf - (b) Donor: Jeff Schlimmer (Jeffrey.Schlimmer@a.gp.cs.cmu.edu) - (c) Date: 27 April 1987 - -3. Past Usage: - 1. Schlimmer,J.S. (1987). Concept Acquisition Through Representational - Adjustment (Technical Report 87-19). Doctoral disseration, Department - of Information and Computer Science, University of California, Irvine. - --- STAGGER: asymptoted to 95% classification accuracy after reviewing - 1000 instances. - 2. Iba,W., Wogulis,J., & Langley,P. (1988). Trading off Simplicity - and Coverage in Incremental Concept Learning. In Proceedings of - the 5th International Conference on Machine Learning, 73-79. - Ann Arbor, Michigan: Morgan Kaufmann. - -- approximately the same results with their HILLARY algorithm - 3. In the following references a set of rules (given below) were - learned for this data set which may serve as a point of - comparison for other researchers. - - Duch W, Adamczak R, Grabczewski K (1996) Extraction of logical rules - from training data using backpropagation networks, in: Proc. of the - The 1st Online Workshop on Soft Computing, 19-30.Aug.1996, pp. 25-30, - available on-line at: http://www.bioele.nuee.nagoya-u.ac.jp/wsc1/ - - Duch W, Adamczak R, Grabczewski K, Ishikawa M, Ueda H, Extraction of - crisp logical rules using constrained backpropagation networks - - comparison of two new approaches, in: Proc. of the European Symposium - on Artificial Neural Networks (ESANN'97), Bruge, Belgium 16-18.4.1997, - pp. xx-xx - - Wlodzislaw Duch, Department of Computer Methods, Nicholas Copernicus - University, 87-100 Torun, Grudziadzka 5, Poland - e-mail: duch@phys.uni.torun.pl - WWW http://www.phys.uni.torun.pl/kmk/ - - Date: Mon, 17 Feb 1997 13:47:40 +0100 - From: Wlodzislaw Duch - Organization: Dept. of Computer Methods, UMK - - I have attached a file containing logical rules for mushrooms. - It should be helpful for other people since only in the last year I - have seen about 10 papers analyzing this dataset and obtaining quite - complex rules. We will try to contribute other results later. - - With best regards, Wlodek Duch - ________________________________________________________________ - - Logical rules for the mushroom data sets. - - Logical rules given below seem to be the simplest possible for the - mushroom dataset and therefore should be treated as benchmark results. - - Disjunctive rules for poisonous mushrooms, from most general - to most specific: - - P_1) odor=NOT(almond.OR.anise.OR.none) - 120 poisonous cases missed, 98.52% accuracy - - P_2) spore-print-color=green - 48 cases missed, 99.41% accuracy - - P_3) odor=none.AND.stalk-surface-below-ring=scaly.AND. - (stalk-color-above-ring=NOT.brown) - 8 cases missed, 99.90% accuracy - - P_4) habitat=leaves.AND.cap-color=white - 100% accuracy - - Rule P_4) may also be - - P_4') population=clustered.AND.cap_color=white - - These rule involve 6 attributes (out of 22). Rules for edible - mushrooms are obtained as negation of the rules given above, for - example the rule: - - odor=(almond.OR.anise.OR.none).AND.spore-print-color=NOT.green - - gives 48 errors, or 99.41% accuracy on the whole dataset. - - Several slightly more complex variations on these rules exist, - involving other attributes, such as gill_size, gill_spacing, - stalk_surface_above_ring, but the rules given above are the simplest - we have found. - - -4. Relevant Information: - This data set includes descriptions of hypothetical samples - corresponding to 23 species of gilled mushrooms in the Agaricus and - Lepiota Family (pp. 500-525). Each species is identified as - definitely edible, definitely poisonous, or of unknown edibility and - not recommended. This latter class was combined with the poisonous - one. The Guide clearly states that there is no simple rule for - determining the edibility of a mushroom; no rule like ``leaflets - three, let it be'' for Poisonous Oak and Ivy. - -5. Number of Instances: 8124 - -6. Number of Attributes: 22 (all nominally valued) - -7. Attribute Information: (classes: edible=e, poisonous=p) - 1. cap-shape: bell=b,conical=c,convex=x,flat=f, - knobbed=k,sunken=s - 2. cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s - 3. cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r, - pink=p,purple=u,red=e,white=w,yellow=y - 4. bruises?: bruises=t,no=f - 5. odor: almond=a,anise=l,creosote=c,fishy=y,foul=f, - musty=m,none=n,pungent=p,spicy=s - 6. gill-attachment: attached=a,descending=d,free=f,notched=n - 7. gill-spacing: close=c,crowded=w,distant=d - 8. gill-size: broad=b,narrow=n - 9. gill-color: black=k,brown=n,buff=b,chocolate=h,gray=g, - green=r,orange=o,pink=p,purple=u,red=e, - white=w,yellow=y - 10. stalk-shape: enlarging=e,tapering=t - 11. stalk-root: bulbous=b,club=c,cup=u,equal=e, - rhizomorphs=z,rooted=r,missing=? - 12. stalk-surface-above-ring: fibrous=f,scaly=y,silky=k,smooth=s - 13. stalk-surface-below-ring: fibrous=f,scaly=y,silky=k,smooth=s - 14. stalk-color-above-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, - pink=p,red=e,white=w,yellow=y - 15. stalk-color-below-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, - pink=p,red=e,white=w,yellow=y - 16. veil-type: partial=p,universal=u - 17. veil-color: brown=n,orange=o,white=w,yellow=y - 18. ring-number: none=n,one=o,two=t - 19. ring-type: cobwebby=c,evanescent=e,flaring=f,large=l, - none=n,pendant=p,sheathing=s,zone=z - 20. spore-print-color: black=k,brown=n,buff=b,chocolate=h,green=r, - orange=o,purple=u,white=w,yellow=y - 21. population: abundant=a,clustered=c,numerous=n, - scattered=s,several=v,solitary=y - 22. habitat: grasses=g,leaves=l,meadows=m,paths=p, - urban=u,waste=w,woods=d - -8. Missing Attribute Values: 2480 of them (denoted by "?"), all for - attribute #11. - -9. Class Distribution: - -- edible: 4208 (51.8%) - -- poisonous: 3916 (48.2%) - -- total: 8124 instances diff --git a/ml-xgboost/demo/binary_classification/mapfeat.py b/ml-xgboost/demo/binary_classification/mapfeat.py deleted file mode 100644 index 4cb98f6..0000000 --- a/ml-xgboost/demo/binary_classification/mapfeat.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/python - -def loadfmap( fname ): - fmap = {} - nmap = {} - - for l in open( fname ): - arr = l.split() - if arr[0].find('.') != -1: - idx = int( arr[0].strip('.') ) - assert idx not in fmap - fmap[ idx ] = {} - ftype = arr[1].strip(':') - content = arr[2] - else: - content = arr[0] - for it in content.split(','): - if it.strip() == '': - continue - k , v = it.split('=') - fmap[ idx ][ v ] = len(nmap) - nmap[ len(nmap) ] = ftype+'='+k - return fmap, nmap - -def write_nmap( fo, nmap ): - for i in range( len(nmap) ): - fo.write('%d\t%s\ti\n' % (i, nmap[i]) ) - -# start here -fmap, nmap = loadfmap( 'agaricus-lepiota.fmap' ) -fo = open( 'featmap.txt', 'w' ) -write_nmap( fo, nmap ) -fo.close() - -fo = open( 'agaricus.txt', 'w' ) -for l in open( 'agaricus-lepiota.data' ): - arr = l.split(',') - if arr[0] == 'p': - fo.write('1') - else: - assert arr[0] == 'e' - fo.write('0') - for i in range( 1,len(arr) ): - fo.write( ' %d:1' % fmap[i][arr[i].strip()] ) - fo.write('\n') - -fo.close() diff --git a/ml-xgboost/demo/binary_classification/mknfold.py b/ml-xgboost/demo/binary_classification/mknfold.py deleted file mode 100644 index a941f86..0000000 --- a/ml-xgboost/demo/binary_classification/mknfold.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/python -import sys -import random - -if len(sys.argv) < 2: - print ('Usage: [nfold = 5]') - exit(0) - -random.seed( 10 ) - -k = int( sys.argv[2] ) -if len(sys.argv) > 3: - nfold = int( sys.argv[3] ) -else: - nfold = 5 - -fi = open( sys.argv[1], 'r' ) -ftr = open( sys.argv[1]+'.train', 'w' ) -fte = open( sys.argv[1]+'.test', 'w' ) -for l in fi: - if random.randint( 1 , nfold ) == k: - fte.write( l ) - else: - ftr.write( l ) - -fi.close() -ftr.close() -fte.close() - diff --git a/ml-xgboost/demo/binary_classification/mushroom.conf b/ml-xgboost/demo/binary_classification/mushroom.conf deleted file mode 100644 index 435c9bf..0000000 --- a/ml-xgboost/demo/binary_classification/mushroom.conf +++ /dev/null @@ -1,29 +0,0 @@ -# General Parameters, see comment for each definition -# choose the booster, can be gbtree or gblinear -booster = gbtree -# choose logistic regression loss function for binary classification -objective = binary:logistic - -# Tree Booster Parameters -# step size shrinkage -eta = 1.0 -# minimum loss reduction required to make a further partition -gamma = 1.0 -# minimum sum of instance weight(hessian) needed in a child -min_child_weight = 1 -# maximum depth of a tree -max_depth = 3 - -# Task Parameters -# the number of round to do boosting -num_round = 2 -# 0 means do not save any model except the final round model -save_period = 0 -# The path of training data -data = "agaricus.txt.train" -# The path of validation data, used to monitor training process, here [test] sets name of the validation set -eval[test] = "agaricus.txt.test" -# evaluate on training data as well each round -eval_train = 1 -# The path of test data -test:data = "agaricus.txt.test" diff --git a/ml-xgboost/demo/binary_classification/runexp.sh b/ml-xgboost/demo/binary_classification/runexp.sh deleted file mode 100644 index 68c3e6f..0000000 --- a/ml-xgboost/demo/binary_classification/runexp.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -# map feature using indicator encoding, also produce featmap.txt -python mapfeat.py -# split train and test -python mknfold.py agaricus.txt 1 -# training and output the models -../../xgboost mushroom.conf -# output prediction task=pred -../../xgboost mushroom.conf task=pred model_in=0002.model -# print the boosters of 00002.model in dump.raw.txt -../../xgboost mushroom.conf task=dump model_in=0002.model name_dump=dump.raw.txt -# use the feature map in printing for better visualization -../../xgboost mushroom.conf task=dump model_in=0002.model fmap=featmap.txt name_dump=dump.nice.txt -cat dump.nice.txt - diff --git a/ml-xgboost/demo/c-api/CMakeLists.txt b/ml-xgboost/demo/c-api/CMakeLists.txt deleted file mode 100644 index 20a8158..0000000 --- a/ml-xgboost/demo/c-api/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -cmake_minimum_required(VERSION 3.12) -find_package(xgboost REQUIRED) -add_executable(api-demo c-api-demo.c) -target_link_libraries(api-demo xgboost::xgboost) diff --git a/ml-xgboost/demo/c-api/Makefile b/ml-xgboost/demo/c-api/Makefile deleted file mode 100644 index 345079f..0000000 --- a/ml-xgboost/demo/c-api/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -SRC=c-api-demo.c -TGT=c-api-demo - -cc=cc -CFLAGS ?=-O3 -XGBOOST_ROOT ?=../.. -INCLUDE_DIR=-I$(XGBOOST_ROOT)/include -I$(XGBOOST_ROOT)/dmlc-core/include -I$(XGBOOST_ROOT)/rabit/include -LIB_DIR=-L$(XGBOOST_ROOT)/lib - -build: $(TGT) - -$(TGT): $(SRC) Makefile - $(cc) $(CFLAGS) $(INCLUDE_DIR) $(LIB_DIR) -o $(TGT) $(SRC) -lxgboost - -run: $(TGT) - LD_LIBRARY_PATH=$(XGBOOST_ROOT)/lib ./$(TGT) - -clean: - rm -f $(TGT) diff --git a/ml-xgboost/demo/c-api/README.md b/ml-xgboost/demo/c-api/README.md deleted file mode 100644 index d94f4ac..0000000 --- a/ml-xgboost/demo/c-api/README.md +++ /dev/null @@ -1,30 +0,0 @@ -C-APIs -=== - -**XGBoost** implements a C API originally designed for various language -bindings. For detailed reference, please check xgboost/c_api.h. Here is a -demonstration of using the API. - -# CMake -If you use **CMake** for your project, you can either install **XGBoost** -somewhere in your system and tell CMake to find it by calling -`find_package(xgboost)`, or put **XGBoost** inside your project's source tree -and call **CMake** command: `add_subdirectory(xgboost)`. To use -`find_package()`, put the following in your **CMakeLists.txt**: - -``` CMake -find_package(xgboost REQUIRED) -add_executable(api-demo c-api-demo.c) -target_link_libraries(api-demo xgboost::xgboost) -``` - -If you want to put XGBoost inside your project (like git submodule), use this -instead: -``` CMake -add_subdirectory(xgboost) -add_executable(api-demo c-api-demo.c) -target_link_libraries(api-demo xgboost) -``` - -# make -You can start by modifying the makefile in this directory to fit your need. \ No newline at end of file diff --git a/ml-xgboost/demo/c-api/c-api-demo.c b/ml-xgboost/demo/c-api/c-api-demo.c deleted file mode 100644 index 2fd212f..0000000 --- a/ml-xgboost/demo/c-api/c-api-demo.c +++ /dev/null @@ -1,88 +0,0 @@ -/*! - * Copyright 2019 XGBoost contributors - * - * \file c-api-demo.c - * \brief A simple example of using xgboost C API. - */ - -#include -#include -#include - -#define safe_xgboost(call) { \ -int err = (call); \ -if (err != 0) { \ - fprintf(stderr, "%s:%d: error in %s: %s\n", __FILE__, __LINE__, #call, XGBGetLastError()); \ - exit(1); \ -} \ -} - -int main(int argc, char** argv) { - int silent = 0; - int use_gpu = 0; // set to 1 to use the GPU for training - - // load the data - DMatrixHandle dtrain, dtest; - safe_xgboost(XGDMatrixCreateFromFile("../data/agaricus.txt.train", silent, &dtrain)); - safe_xgboost(XGDMatrixCreateFromFile("../data/agaricus.txt.test", silent, &dtest)); - - // create the booster - BoosterHandle booster; - DMatrixHandle eval_dmats[2] = {dtrain, dtest}; - safe_xgboost(XGBoosterCreate(eval_dmats, 2, &booster)); - - // configure the training - // available parameters are described here: - // https://xgboost.readthedocs.io/en/latest/parameter.html - safe_xgboost(XGBoosterSetParam(booster, "tree_method", use_gpu ? "gpu_hist" : "hist")); - if (use_gpu) { - // set the GPU to use; - // this is not necessary, but provided here as an illustration - safe_xgboost(XGBoosterSetParam(booster, "gpu_id", "0")); - } else { - // avoid evaluating objective and metric on a GPU - safe_xgboost(XGBoosterSetParam(booster, "gpu_id", "-1")); - } - - safe_xgboost(XGBoosterSetParam(booster, "objective", "binary:logistic")); - safe_xgboost(XGBoosterSetParam(booster, "min_child_weight", "1")); - safe_xgboost(XGBoosterSetParam(booster, "gamma", "0.1")); - safe_xgboost(XGBoosterSetParam(booster, "max_depth", "3")); - safe_xgboost(XGBoosterSetParam(booster, "verbosity", silent ? "0" : "1")); - - // train and evaluate for 10 iterations - int n_trees = 10; - const char* eval_names[2] = {"train", "test"}; - const char* eval_result = NULL; - for (int i = 0; i < n_trees; ++i) { - safe_xgboost(XGBoosterUpdateOneIter(booster, i, dtrain)); - safe_xgboost(XGBoosterEvalOneIter(booster, i, eval_dmats, eval_names, 2, &eval_result)); - printf("%s\n", eval_result); - } - - // predict - bst_ulong out_len = 0; - const float* out_result = NULL; - int n_print = 10; - - safe_xgboost(XGBoosterPredict(booster, dtest, 0, 0, 0, &out_len, &out_result)); - printf("y_pred: "); - for (int i = 0; i < n_print; ++i) { - printf("%1.4f ", out_result[i]); - } - printf("\n"); - - // print true labels - safe_xgboost(XGDMatrixGetFloatInfo(dtest, "label", &out_len, &out_result)); - printf("y_test: "); - for (int i = 0; i < n_print; ++i) { - printf("%1.4f ", out_result[i]); - } - printf("\n"); - - // free everything - safe_xgboost(XGBoosterFree(booster)); - safe_xgboost(XGDMatrixFree(dtrain)); - safe_xgboost(XGDMatrixFree(dtest)); - return 0; -} diff --git a/ml-xgboost/demo/dask/README.md b/ml-xgboost/demo/dask/README.md deleted file mode 100644 index b70248c..0000000 --- a/ml-xgboost/demo/dask/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Dask -==== - -This directory contains some demonstrations for using `dask` with `XGBoost`. -For an overview, see -https://xgboost.readthedocs.io/en/latest/tutorials/dask.html . \ No newline at end of file diff --git a/ml-xgboost/demo/dask/cpu_training.py b/ml-xgboost/demo/dask/cpu_training.py deleted file mode 100644 index b86958e..0000000 --- a/ml-xgboost/demo/dask/cpu_training.py +++ /dev/null @@ -1,41 +0,0 @@ -import xgboost as xgb -from xgboost.dask import DaskDMatrix -from dask.distributed import Client -from dask.distributed import LocalCluster -from dask import array as da - - -def main(client): - # generate some random data for demonstration - m = 100000 - n = 100 - X = da.random.random(size=(m, n), chunks=100) - y = da.random.random(size=(m, ), chunks=100) - - # DaskDMatrix acts like normal DMatrix, works as a proxy for local - # DMatrix scatter around workers. - dtrain = DaskDMatrix(client, X, y) - - # Use train method from xgboost.dask instead of xgboost. This - # distributed version of train returns a dictionary containing the - # resulting booster and evaluation history obtained from - # evaluation metrics. - output = xgb.dask.train(client, - {'verbosity': 1, - 'tree_method': 'hist'}, - dtrain, - num_boost_round=4, evals=[(dtrain, 'train')]) - bst = output['booster'] - history = output['history'] - - # you can pass output directly into `predict` too. - prediction = xgb.dask.predict(client, bst, dtrain) - print('Evaluation history:', history) - return prediction - - -if __name__ == '__main__': - # or use other clusters for scaling - with LocalCluster(n_workers=7, threads_per_worker=4) as cluster: - with Client(cluster) as client: - main(client) diff --git a/ml-xgboost/demo/dask/gpu_training.py b/ml-xgboost/demo/dask/gpu_training.py deleted file mode 100644 index 3b3363f..0000000 --- a/ml-xgboost/demo/dask/gpu_training.py +++ /dev/null @@ -1,45 +0,0 @@ -from dask_cuda import LocalCUDACluster -from dask.distributed import Client -from dask import array as da -import xgboost as xgb -from xgboost.dask import DaskDMatrix - - -def main(client): - # generate some random data for demonstration - m = 100000 - n = 100 - X = da.random.random(size=(m, n), chunks=100) - y = da.random.random(size=(m, ), chunks=100) - - # DaskDMatrix acts like normal DMatrix, works as a proxy for local - # DMatrix scatter around workers. - dtrain = DaskDMatrix(client, X, y) - - # Use train method from xgboost.dask instead of xgboost. This - # distributed version of train returns a dictionary containing the - # resulting booster and evaluation history obtained from - # evaluation metrics. - output = xgb.dask.train(client, - {'verbosity': 2, - # Golden line for GPU training - 'tree_method': 'gpu_hist'}, - dtrain, - num_boost_round=4, evals=[(dtrain, 'train')]) - bst = output['booster'] - history = output['history'] - - # you can pass output directly into `predict` too. - prediction = xgb.dask.predict(client, bst, dtrain) - prediction = prediction.compute() - print('Evaluation history:', history) - return prediction - - -if __name__ == '__main__': - # `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here - # `n_workers` represents the number of GPUs since we use one GPU per worker - # process. - with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster: - with Client(cluster) as client: - main(client) diff --git a/ml-xgboost/demo/dask/sklearn_cpu_training.py b/ml-xgboost/demo/dask/sklearn_cpu_training.py deleted file mode 100644 index 0549aa3..0000000 --- a/ml-xgboost/demo/dask/sklearn_cpu_training.py +++ /dev/null @@ -1,39 +0,0 @@ -'''Dask interface demo: - -Use scikit-learn regressor interface with CPU histogram tree method.''' -from dask.distributed import Client -from dask.distributed import LocalCluster -from dask import array as da -import xgboost - - -def main(client): - # generate some random data for demonstration - n = 100 - m = 10000 - partition_size = 100 - X = da.random.random((m, n), partition_size) - y = da.random.random(m, partition_size) - - regressor = xgboost.dask.DaskXGBRegressor(verbosity=1, n_estimators=2) - regressor.set_params(tree_method='hist') - # assigning client here is optional - regressor.client = client - - regressor.fit(X, y, eval_set=[(X, y)]) - prediction = regressor.predict(X) - - bst = regressor.get_booster() - history = regressor.evals_result() - - print('Evaluation history:', history) - # returned prediction is always a dask array. - assert isinstance(prediction, da.Array) - return bst # returning the trained model - - -if __name__ == '__main__': - # or use other clusters for scaling - with LocalCluster(n_workers=4, threads_per_worker=1) as cluster: - with Client(cluster) as client: - main(client) diff --git a/ml-xgboost/demo/dask/sklearn_gpu_training.py b/ml-xgboost/demo/dask/sklearn_gpu_training.py deleted file mode 100644 index afba215..0000000 --- a/ml-xgboost/demo/dask/sklearn_gpu_training.py +++ /dev/null @@ -1,42 +0,0 @@ -'''Dask interface demo: - -Use scikit-learn regressor interface with GPU histogram tree method.''' - -from dask.distributed import Client -# It's recommended to use dask_cuda for GPU assignment -from dask_cuda import LocalCUDACluster -from dask import array as da -import xgboost - - -def main(client): - # generate some random data for demonstration - n = 100 - m = 1000000 - partition_size = 10000 - X = da.random.random((m, n), partition_size) - y = da.random.random(m, partition_size) - - regressor = xgboost.dask.DaskXGBRegressor(verbosity=1) - regressor.set_params(tree_method='gpu_hist') - # assigning client here is optional - regressor.client = client - - regressor.fit(X, y, eval_set=[(X, y)]) - prediction = regressor.predict(X) - - bst = regressor.get_booster() - history = regressor.evals_result() - - print('Evaluation history:', history) - # returned prediction is always a dask array. - assert isinstance(prediction, da.Array) - return bst # returning the trained model - - -if __name__ == '__main__': - # With dask cuda, one can scale up XGBoost to arbitrary GPU clusters. - # `LocalCUDACluster` used here is only for demonstration purpose. - with LocalCUDACluster() as cluster: - with Client(cluster) as client: - main(client) diff --git a/ml-xgboost/demo/data/README.md b/ml-xgboost/demo/data/README.md deleted file mode 100644 index d2d63ec..0000000 --- a/ml-xgboost/demo/data/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This folder contains processed example dataset used by the demos. -Copyright of the dataset belongs to the original copyright holder diff --git a/ml-xgboost/demo/data/agaricus.txt.test b/ml-xgboost/demo/data/agaricus.txt.test deleted file mode 100644 index 83bdd26..0000000 --- a/ml-xgboost/demo/data/agaricus.txt.test +++ /dev/null @@ -1,1611 +0,0 @@ -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 4:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 4:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 3:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 4:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 4:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 4:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 3:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 3:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 1:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 5:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 5:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 5:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 5:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 5:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 3:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 4:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 3:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -0 5:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -0 5:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 5:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 5:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 1:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 7:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 4:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 3:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 1:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 5:1 9:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 1:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -0 4:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 5:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 4:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -0 5:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 3:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 5:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 5:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 5:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 1:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 4:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -0 3:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -0 5:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 1:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 3:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 1:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 1:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -0 4:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 3:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -0 5:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 1:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -0 3:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -0 3:1 9:1 13:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 10:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -0 3:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 18:1 22:1 28:1 34:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 13:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 -1 3:1 9:1 13:1 22:1 28:1 32:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 28:1 32:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 4:1 9:1 20:1 22:1 29:1 34:1 37:1 40:1 52:1 53:1 56:1 63:1 67:1 78:1 87:1 88:1 93:1 95:1 98:1 112:1 115:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 1:1 9:1 20:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 56:1 63:1 67:1 78:1 87:1 88:1 93:1 95:1 98:1 112:1 115:1 121:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -0 4:1 9:1 13:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -1 2:1 9:1 20:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 56:1 63:1 67:1 78:1 87:1 88:1 93:1 95:1 98:1 112:1 115:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 28:1 34:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 13:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 126:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 diff --git a/ml-xgboost/demo/data/agaricus.txt.train b/ml-xgboost/demo/data/agaricus.txt.train deleted file mode 100644 index 10c7902..0000000 --- a/ml-xgboost/demo/data/agaricus.txt.train +++ /dev/null @@ -1,6513 +0,0 @@ -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 4:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 3:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 4:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 4:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 4:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 4:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 4:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 6:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 6:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 4:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 4:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 4:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 124:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 20:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 123:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 45:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 9:1 11:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 1:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 123:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 1:1 10:1 19:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 1:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 122:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 9:1 11:1 21:1 24:1 34:1 36:1 39:1 51:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 124:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 41:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 116:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 20:1 21:1 23:1 34:1 37:1 40:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 19:1 21:1 24:1 34:1 37:1 40:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 21:1 24:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 111:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 3:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 124:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 9:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 9:1 20:1 21:1 24:1 34:1 36:1 39:1 42:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 10:1 20:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 19:1 21:1 23:1 34:1 37:1 40:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 1:1 9:1 19:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 20:1 21:1 23:1 34:1 36:1 39:1 48:1 53:1 60:1 65:1 67:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 123:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 19:1 21:1 23:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 122:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 42:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 124:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 10:1 20:1 21:1 24:1 34:1 36:1 39:1 41:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 116:1 122:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 9:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 1:1 10:1 19:1 21:1 23:1 34:1 36:1 39:1 45:1 53:1 56:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 122:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 10:1 11:1 21:1 30:1 34:1 36:1 40:1 48:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 124:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 114:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 51:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 124:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 19:1 21:1 30:1 34:1 36:1 40:1 42:1 53:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 44:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 37:1 39:1 48:1 54:1 58:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 39:1 41:1 54:1 58:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 105:1 114:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 42:1 54:1 58:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 106:1 117:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 3:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 117:1 126:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 3:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 1:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 7:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 5:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 5:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 1:1 9:1 19:1 21:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 112:1 115:1 121:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -0 3:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 3:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 36:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 82:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 5:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 4:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 14:1 22:1 25:1 34:1 37:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 3:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 3:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -0 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 73:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -0 3:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 14:1 22:1 25:1 34:1 36:1 40:1 49:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 82:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 19:1 22:1 25:1 34:1 36:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 16:1 22:1 25:1 34:1 37:1 40:1 48:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 117:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 10:1 16:1 22:1 25:1 34:1 37:1 40:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 5:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 1:1 7:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 5:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 5:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 1:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 1:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -0 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 5:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 5:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 4:1 7:1 14:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -0 4:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 19:1 22:1 25:1 34:1 36:1 40:1 42:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 105:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 49:1 54:1 55:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 5:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 5:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 3:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 1:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -0 3:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 7:1 18:1 21:1 29:1 34:1 36:1 39:1 42:1 54:1 55:1 65:1 69:1 73:1 86:1 88:1 92:1 95:1 102:1 106:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 5:1 7:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 1:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -0 5:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 5:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 8:1 19:1 21:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 112:1 115:1 121:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 4:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 5:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 2:1 8:1 19:1 21:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 112:1 115:1 121:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 1:1 8:1 19:1 21:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 112:1 115:1 121:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 1:1 7:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 3:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 9:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 5:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 1:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -0 3:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 4:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 5:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 4:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 3:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 3:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 1:1 9:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 5:1 9:1 19:1 21:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 112:1 115:1 121:1 -1 5:1 7:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 5:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 1:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 5:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 5:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 5:1 9:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 5:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 3:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 5:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 5:1 9:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -0 5:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 19:1 21:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 112:1 115:1 121:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 4:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -0 3:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -0 3:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 5:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 1:1 9:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 3:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 5:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 1:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 5:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 5:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 1:1 9:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 1:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 1:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 4:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 3:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 5:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 1:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 5:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 5:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 5:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 2:1 9:1 19:1 21:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 112:1 115:1 121:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 3:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 1:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 3:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 5:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 5:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 1:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 4:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 4:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 3:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 1:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 1:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -0 3:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 44:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 1:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -0 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 3:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -0 4:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 4:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 1:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -0 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 1:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -0 5:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 5:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 5:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 5:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -0 4:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 1:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -0 5:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 5:1 8:1 19:1 21:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 112:1 115:1 121:1 -0 5:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 4:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 1:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -0 5:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 5:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 5:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 1:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 3:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 3:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 4:1 9:1 15:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 1:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 4:1 9:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 9:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 1:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 1:1 9:1 19:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -0 4:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 4:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 4:1 7:1 13:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 4:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -0 4:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 5:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 4:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 3:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 126:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 1:1 10:1 19:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 1:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 3:1 9:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 5:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -0 4:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 3:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 17:1 22:1 29:1 34:1 36:1 40:1 49:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -0 5:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -0 3:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -0 5:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 48:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 118:1 126:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 4:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 120:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 65:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -0 3:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 3:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -0 5:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 3:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 1:1 9:1 20:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 1:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 1:1 10:1 12:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -0 3:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 69:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 48:1 53:1 55:1 64:1 68:1 70:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 123:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -0 5:1 10:1 18:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 76:1 85:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 7:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 123:1 -1 1:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 70:1 80:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 61:1 65:1 69:1 76:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -0 4:1 9:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 5:1 7:1 11:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 64:1 67:1 77:1 87:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 79:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 7:1 14:1 22:1 27:1 34:1 36:1 39:1 44:1 53:1 55:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 100:1 108:1 119:1 126:1 -1 3:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 45:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 122:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 120:1 -1 3:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -0 3:1 7:1 11:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 55:1 62:1 66:1 77:1 79:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 9:1 19:1 22:1 29:1 34:1 36:1 40:1 51:1 53:1 61:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 99:1 108:1 119:1 126:1 -1 4:1 10:1 19:1 21:1 27:1 34:1 36:1 39:1 51:1 54:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 124:1 -1 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 46:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 109:1 118:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 80:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 4:1 10:1 14:1 21:1 27:1 34:1 36:1 39:1 44:1 54:1 55:1 62:1 69:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 20:1 22:1 27:1 34:1 36:1 39:1 45:1 53:1 55:1 64:1 68:1 71:1 84:1 88:1 92:1 95:1 100:1 108:1 118:1 120:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 9:1 12:1 21:1 29:1 34:1 36:1 39:1 50:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 98:1 112:1 115:1 125:1 -1 4:1 10:1 12:1 21:1 27:1 34:1 36:1 39:1 48:1 54:1 55:1 65:1 66:1 77:1 86:1 88:1 92:1 95:1 102:1 108:1 118:1 124:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 13:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 28:1 34:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 13:1 22:1 28:1 32:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 28:1 34:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 1:1 9:1 20:1 22:1 29:1 34:1 37:1 40:1 52:1 53:1 56:1 63:1 67:1 78:1 87:1 88:1 93:1 95:1 98:1 112:1 115:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -0 3:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 28:1 32:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 126:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 28:1 34:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 9:1 13:1 22:1 28:1 34:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 11:1 22:1 28:1 32:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 13:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 28:1 34:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 13:1 22:1 28:1 32:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 28:1 32:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 13:1 22:1 28:1 34:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 28:1 34:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 9:1 13:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 9:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 126:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 28:1 32:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 9:1 18:1 22:1 28:1 32:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 9:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 126:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 20:1 22:1 29:1 34:1 37:1 40:1 52:1 53:1 56:1 63:1 67:1 78:1 87:1 88:1 93:1 95:1 98:1 112:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -0 3:1 10:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 11:1 22:1 28:1 34:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 18:1 22:1 28:1 34:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 13:1 22:1 28:1 34:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -0 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 20:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 56:1 63:1 67:1 78:1 87:1 88:1 93:1 95:1 98:1 112:1 115:1 121:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -0 4:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 28:1 34:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 2:1 9:1 20:1 22:1 29:1 34:1 37:1 40:1 52:1 53:1 56:1 63:1 67:1 78:1 87:1 88:1 93:1 95:1 98:1 112:1 115:1 121:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -1 4:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 9:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 3:1 9:1 18:1 22:1 28:1 32:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 4:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 18:1 22:1 28:1 32:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 13:1 22:1 28:1 34:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 10:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 9:1 18:1 22:1 28:1 34:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -0 4:1 9:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 4:1 10:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 28:1 32:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 3:1 9:1 13:1 22:1 28:1 34:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -0 1:1 9:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 126:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 3:1 9:1 11:1 22:1 28:1 32:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -1 3:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 20:1 22:1 29:1 34:1 37:1 40:1 51:1 53:1 56:1 63:1 67:1 78:1 87:1 88:1 93:1 95:1 98:1 112:1 115:1 121:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 10:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 126:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 9:1 13:1 22:1 28:1 32:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 3:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 3:1 9:1 11:1 22:1 28:1 34:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 4:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 4:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 9:1 14:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 4:1 9:1 11:1 22:1 28:1 32:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -0 1:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 4:1 9:1 16:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 115:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 10:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 28:1 32:1 36:1 39:1 51:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 10:1 13:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 9:1 11:1 21:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 4:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 3:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -1 5:1 9:1 13:1 22:1 28:1 32:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 9:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -0 3:1 9:1 11:1 22:1 29:1 34:1 36:1 39:1 51:1 53:1 55:1 63:1 67:1 70:1 79:1 88:1 92:1 96:1 102:1 112:1 119:1 123:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -1 3:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -0 5:1 10:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 4:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -0 1:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 113:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 118:1 121:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 118:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 -0 1:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 115:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -1 5:1 10:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 7:1 14:1 22:1 29:1 34:1 37:1 39:1 48:1 53:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 7:1 19:1 22:1 29:1 34:1 37:1 39:1 45:1 53:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 117:1 120:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 118:1 121:1 -1 3:1 10:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 10:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 18:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -1 3:1 10:1 11:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 3:1 9:1 13:1 22:1 28:1 34:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 75:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 68:1 77:1 84:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 123:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 106:1 115:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 110:1 115:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 113:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 115:1 121:1 -1 5:1 9:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -0 1:1 10:1 19:1 22:1 29:1 34:1 37:1 39:1 51:1 53:1 61:1 65:1 69:1 77:1 86:1 88:1 92:1 96:1 102:1 112:1 116:1 120:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 106:1 118:1 121:1 -0 5:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 47:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -1 4:1 9:1 13:1 22:1 28:1 32:1 36:1 39:1 52:1 53:1 56:1 64:1 67:1 72:1 81:1 88:1 92:1 94:1 101:1 112:1 115:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 118:1 121:1 -1 5:1 9:1 11:1 22:1 31:1 34:1 36:1 40:1 43:1 54:1 61:1 65:1 68:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 121:1 -1 5:1 10:1 18:1 22:1 26:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -1 5:1 9:1 11:1 22:1 27:1 34:1 36:1 40:1 43:1 54:1 61:1 64:1 69:1 75:1 86:1 88:1 92:1 95:1 98:1 112:1 118:1 126:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 90:1 95:1 102:1 107:1 118:1 121:1 -0 4:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 42:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 107:1 115:1 121:1 -0 3:1 10:1 11:1 22:1 29:1 32:1 36:1 39:1 52:1 53:1 61:1 65:1 69:1 74:1 83:1 88:1 91:1 95:1 102:1 110:1 115:1 121:1 diff --git a/ml-xgboost/demo/data/dermatology.data.test b/ml-xgboost/demo/data/dermatology.data.test deleted file mode 100644 index 7619d99..0000000 --- a/ml-xgboost/demo/data/dermatology.data.test +++ /dev/null @@ -1,110 +0,0 @@ -3 0:2 1:1 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -3 0:3 1:2 2:1 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:0 14:0 15:3 16:2 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:1 28:0 29:0 30:0 31:2 32:0 -3 0:1 1:2 2:2 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:2 16:2 17:2 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -3 0:2 1:2 2:2 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -3 0:1 1:2 2:2 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:2 17:2 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:2 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:2 32:0 -4 0:2 1:1 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:1 16:3 17:2 18:2 19:1 20:3 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:3 32:0 -4 0:1 1:1 2:1 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:2 16:3 17:1 18:2 19:2 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -4 0:1 1:1 2:0 3:2 4:0 5:0 6:0 7:0 8:1 9:0 10:0 11:0 12:0 13:0 14:3 15:2 16:3 17:2 18:1 19:1 20:3 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:3 32:0 -4 0:1 1:1 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:1 16:3 17:1 18:1 19:0 20:2 21:0 22:1 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:3 32:0 -5 0:2 1:2 2:1 3:1 4:0 5:0 6:2 7:0 8:2 9:0 10:1 11:0 12:0 13:0 14:0 15:1 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:1 30:2 31:2 32:0 -5 0:3 1:2 2:0 3:1 4:0 5:0 6:2 7:0 8:1 9:0 10:1 11:0 12:0 13:0 14:0 15:1 16:2 17:1 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:1 30:2 31:2 32:0 -5 0:2 1:2 2:1 3:1 4:0 5:0 6:2 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:1 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:1 30:2 31:2 32:0 -3 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:2 17:1 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:2 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:1 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:2 28:0 29:0 30:0 31:3 32:0 -0 0:3 1:3 2:2 3:0 4:1 5:0 6:0 7:0 8:2 9:2 10:1 11:0 12:0 13:0 14:0 15:0 16:2 17:1 18:3 19:3 20:3 21:2 22:1 23:0 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:3 3:1 4:0 5:0 6:0 7:0 8:3 9:2 10:1 11:0 12:0 13:0 14:0 15:1 16:2 17:0 18:3 19:3 20:3 21:2 22:1 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -0 0:1 1:2 2:2 3:1 4:1 5:0 6:0 7:0 8:2 9:2 10:1 11:0 12:0 13:1 14:0 15:0 16:2 17:0 18:2 19:3 20:3 21:3 22:2 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:3 3:1 4:0 5:0 6:0 7:0 8:1 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:0 18:2 19:3 20:3 21:2 22:1 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:2 3:2 4:0 5:0 6:0 7:0 8:2 9:1 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:0 18:3 19:2 20:3 21:2 22:2 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -1 0:2 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:1 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -1 0:3 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:1 9:0 10:1 11:0 12:0 13:1 14:0 15:3 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:3 32:0 -3 0:2 1:2 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -3 0:1 1:2 2:2 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -3 0:2 1:2 2:1 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:3 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:1 18:2 19:2 20:3 21:2 22:1 23:0 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:3 2:3 3:1 4:0 5:0 6:0 7:0 8:2 9:1 10:0 11:0 12:0 13:0 14:0 15:0 16:3 17:0 18:2 19:2 20:2 21:3 22:0 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -1 0:2 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:1 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -1 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:1 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:3 32:0 -1 0:2 1:3 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:3 32:0 -2 0:2 1:1 2:2 3:1 4:0 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:0 28:2 29:0 30:0 31:3 32:3 -2 0:2 1:2 2:3 3:1 4:0 5:3 6:0 7:2 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:3 16:3 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:3 27:2 28:3 29:0 30:0 31:3 32:3 -2 0:3 1:2 2:2 3:1 4:0 5:2 6:0 7:3 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:1 28:2 29:0 30:0 31:3 32:3 -2 0:2 1:2 2:3 3:2 4:1 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:3 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:2 28:2 29:0 30:0 31:3 32:3 -0 0:2 1:2 2:2 3:0 4:1 5:0 6:0 7:0 8:1 9:1 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:1 18:2 19:3 20:3 21:2 22:0 23:2 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:3 2:2 3:0 4:1 5:0 6:0 7:0 8:3 9:2 10:1 11:0 12:0 13:1 14:0 15:0 16:3 17:0 18:2 19:2 20:3 21:3 22:0 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:3 3:1 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:0 14:0 15:0 16:3 17:1 18:3 19:3 20:2 21:2 22:2 23:1 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:1 2:3 3:0 4:1 5:0 6:0 7:0 8:0 9:2 10:0 11:0 12:0 13:0 14:0 15:0 16:3 17:0 18:2 19:2 20:3 21:3 22:0 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:2 1:1 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:1 16:2 17:1 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -4 0:2 1:1 2:1 3:3 4:0 5:0 6:1 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:0 16:2 17:0 18:0 19:0 20:3 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:1 1:1 2:2 3:2 4:0 5:0 6:0 7:0 8:1 9:0 10:0 11:0 12:1 13:0 14:3 15:0 16:3 17:2 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:0 32:0 -4 0:2 1:1 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:0 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -4 0:2 1:2 2:1 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:0 16:2 17:1 18:0 19:0 20:3 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:2 2:3 3:3 4:1 5:2 6:0 7:1 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:2 27:0 28:2 29:0 30:0 31:2 32:3 -2 0:3 1:2 2:2 3:2 4:0 5:2 6:0 7:2 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:3 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:2 27:0 28:3 29:0 30:0 31:3 32:3 -2 0:2 1:2 2:2 3:3 4:1 5:2 6:0 7:1 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:2 27:0 28:3 29:0 30:0 31:1 32:2 -3 0:2 1:1 2:2 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:1 2:2 3:1 4:0 5:0 6:0 7:0 8:2 9:3 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:0 18:2 19:3 20:2 21:2 22:0 23:3 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:2 3:0 4:1 5:0 6:0 7:0 8:2 9:1 10:0 11:0 12:0 13:0 14:0 15:0 16:3 17:1 18:1 19:3 20:2 21:2 22:0 23:2 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:1 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:1 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:1 2:2 3:1 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:1 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:3 2:2 3:0 4:1 5:0 6:0 7:0 8:0 9:1 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:0 18:2 19:3 20:3 21:2 22:0 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:1 10:1 11:0 12:0 13:0 14:0 15:0 16:3 17:0 18:2 19:2 20:3 21:2 22:0 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:0 18:2 19:2 20:3 21:2 22:0 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:2 2:3 3:3 4:2 5:3 6:0 7:1 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:3 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:0 28:3 29:0 30:0 31:2 32:3 -2 0:3 1:1 2:2 3:3 4:2 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:0 28:2 29:0 30:0 31:3 32:3 -2 0:2 1:2 2:2 3:3 4:2 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:2 27:0 28:2 29:0 30:0 31:2 32:3 -2 0:2 1:1 2:2 3:3 4:3 5:2 6:0 7:2 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:3 16:1 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:0 28:2 29:0 30:0 31:2 32:3 -2 0:2 1:2 2:3 3:2 4:2 5:3 6:0 7:1 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:3 16:3 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:0 28:3 29:0 30:0 31:3 32:2 -0 0:2 1:2 2:2 3:0 4:1 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:1 18:2 19:3 20:2 21:3 22:1 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:3 3:1 4:0 5:0 6:0 7:0 8:2 9:1 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:2 18:3 19:3 20:3 21:2 22:0 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:0 14:0 15:0 16:3 17:0 18:3 19:2 20:3 21:3 22:0 23:2 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -0 0:2 1:2 2:3 3:0 4:0 5:0 6:0 7:0 8:3 9:0 10:0 11:0 12:0 13:0 14:0 15:0 16:3 17:2 18:2 19:2 20:3 21:2 22:0 23:0 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:2 2:2 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:1 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:0 14:0 15:3 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:1 32:0 -0 0:2 1:3 2:3 3:0 4:0 5:0 6:0 7:0 8:1 9:0 10:1 11:0 12:0 13:1 14:0 15:0 16:2 17:2 18:2 19:2 20:2 21:2 22:2 23:2 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:3 3:0 4:1 5:0 6:0 7:0 8:3 9:0 10:0 11:0 12:0 13:2 14:0 15:0 16:1 17:1 18:2 19:2 20:2 21:3 22:0 23:2 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -3 0:3 1:1 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:1 2:2 3:3 4:2 5:2 6:0 7:3 8:0 9:0 10:0 11:2 12:1 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:1 28:2 29:0 30:0 31:2 32:3 -2 0:1 1:1 2:2 3:3 4:2 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:1 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:0 28:2 29:0 30:0 31:2 32:3 -3 0:2 1:1 2:0 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:2 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:2 3:2 4:2 5:0 6:0 7:0 8:0 9:2 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:0 18:2 19:2 20:3 21:3 22:2 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:1 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:0 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:1 18:2 19:3 20:2 21:3 22:0 23:0 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:0 32:0 -3 0:1 1:1 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:1 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:3 32:0 -3 0:2 1:2 2:2 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:2 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -4 0:1 1:0 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:0 14:3 15:2 16:3 17:0 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -4 0:2 1:0 2:1 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:2 16:3 17:0 18:1 19:0 20:3 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -0 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:0 14:0 15:0 16:3 17:0 18:3 19:2 20:3 21:2 22:0 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:2 1:3 2:2 3:1 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:0 18:2 19:2 20:3 21:2 22:1 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:2 1:1 2:0 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:0 14:2 15:2 16:3 17:0 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:2 32:0 -4 0:3 1:1 2:0 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:1 15:2 16:2 17:0 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:1 2:2 3:2 4:2 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:1 13:0 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:2 27:0 28:2 29:0 30:0 31:2 32:3 -0 0:2 1:2 2:3 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:0 18:2 19:3 20:2 21:3 22:0 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:1 1:1 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:1 16:2 17:0 18:1 19:0 20:3 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:2 1:0 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:2 16:1 17:0 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -5 0:2 1:2 2:2 3:0 4:0 5:0 6:1 7:0 8:1 9:0 10:1 11:0 12:0 13:0 14:0 15:2 16:2 17:1 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:3 30:2 31:2 32:0 -5 0:2 1:2 2:1 3:0 4:0 5:0 6:2 7:0 8:2 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:2 30:3 31:1 32:0 -3 0:2 1:2 2:2 3:1 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:0 14:0 15:0 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -5 0:2 1:2 2:0 3:1 4:0 5:0 6:2 7:0 8:2 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:2 30:2 31:2 32:0 -5 0:3 1:2 2:2 3:0 4:0 5:0 6:3 7:0 8:1 9:0 10:1 11:0 12:0 13:0 14:0 15:1 16:3 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:3 30:2 31:2 32:0 -5 0:2 1:2 2:1 3:0 4:0 5:0 6:2 7:0 8:2 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:2 30:2 31:2 32:0 -0 0:1 1:2 2:2 3:2 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:1 18:3 19:3 20:3 21:2 22:0 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:2 3:3 4:2 5:0 6:0 7:0 8:2 9:3 10:1 11:0 12:0 13:1 14:0 15:0 16:2 17:2 18:2 19:2 20:2 21:2 22:0 23:2 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:2 3:3 4:2 5:0 6:0 7:0 8:2 9:3 10:0 11:0 12:0 13:0 14:0 15:0 16:3 17:0 18:2 19:2 20:3 21:2 22:0 23:3 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -4 0:1 1:1 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:2 16:2 17:0 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:2 32:0 -4 0:2 1:0 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:1 16:2 17:0 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:3 2:2 3:3 4:2 5:0 6:0 7:0 8:3 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:3 17:2 18:3 19:2 20:2 21:2 22:0 23:3 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:0 32:0 -0 0:2 1:2 2:2 3:2 4:2 5:0 6:0 7:0 8:3 9:0 10:1 11:0 12:0 13:0 14:0 15:0 16:2 17:2 18:2 19:2 20:3 21:3 22:0 23:2 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:0 32:0 -0 0:2 1:2 2:2 3:0 4:1 5:0 6:0 7:0 8:0 9:1 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:0 18:2 19:3 20:2 21:3 22:2 23:1 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:3 2:2 3:1 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:0 18:2 19:2 20:2 21:2 22:0 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -1 0:2 1:2 2:0 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:0 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -1 0:2 1:2 2:1 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:1 14:0 15:2 16:2 17:1 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:1 32:0 -3 0:2 1:1 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:1 16:1 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:2 32:0 -3 0:3 1:2 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:1 28:0 29:0 30:0 31:2 32:0 -2 0:3 1:2 2:2 3:2 4:3 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:2 13:0 14:0 15:3 16:3 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:3 27:0 28:3 29:0 30:0 31:2 32:3 -2 0:2 1:1 2:3 3:1 4:2 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:2 27:0 28:1 29:0 30:0 31:2 32:3 -0 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:3 9:3 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:0 18:2 19:3 20:2 21:3 22:0 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:3 32:0 diff --git a/ml-xgboost/demo/data/dermatology.data.train b/ml-xgboost/demo/data/dermatology.data.train deleted file mode 100644 index 846bcab..0000000 --- a/ml-xgboost/demo/data/dermatology.data.train +++ /dev/null @@ -1,256 +0,0 @@ -1 0:2 1:2 2:0 3:3 4:0 5:0 6:0 7:0 8:1 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:1 32:0 -0 0:3 1:3 2:3 3:2 4:1 5:0 6:0 7:0 8:1 9:1 10:1 11:0 12:0 13:1 14:0 15:1 16:2 17:0 18:2 19:2 20:2 21:2 22:2 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -2 0:2 1:1 2:2 3:3 4:1 5:3 6:0 7:3 8:0 9:0 10:0 11:1 12:0 13:0 14:0 15:1 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:3 28:2 29:0 30:0 31:2 32:3 -0 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:3 9:2 10:0 11:0 12:0 13:3 14:0 15:0 16:2 17:0 18:3 19:2 20:2 21:2 22:2 23:0 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -2 0:2 1:3 2:2 3:2 4:2 5:2 6:0 7:2 8:0 9:0 10:0 11:1 12:0 13:0 14:0 15:1 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:2 25:2 26:3 27:2 28:3 29:0 30:0 31:2 32:3 -1 0:2 1:3 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:2 13:1 14:0 15:2 16:2 17:0 18:2 19:0 20:0 21:0 22:1 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -4 0:2 1:1 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:1 16:3 17:0 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:2 2:3 3:3 4:3 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:3 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:2 26:2 27:3 28:2 29:0 30:0 31:3 32:3 -3 0:2 1:2 2:1 3:0 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:2 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:3 2:2 3:1 4:1 5:0 6:0 7:0 8:2 9:2 10:1 11:0 12:0 13:0 14:0 15:0 16:3 17:2 18:3 19:2 20:2 21:2 22:1 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -1 0:2 1:2 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:2 16:2 17:0 18:0 19:0 20:0 21:0 22:1 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:1 32:0 -1 0:3 1:3 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:1 10:0 11:0 12:0 13:2 14:0 15:3 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -0 0:2 1:3 2:3 3:0 4:0 5:0 6:0 7:0 8:1 9:1 10:1 11:0 12:0 13:1 14:0 15:0 16:2 17:1 18:2 19:1 20:2 21:3 22:0 23:2 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:2 2:3 3:3 4:0 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:1 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:0 28:3 29:0 30:0 31:1 32:3 -3 0:1 1:1 2:0 3:1 4:3 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:1 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -1 0:2 1:2 2:1 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:2 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:3 1:3 2:3 3:0 4:0 5:0 6:0 7:0 8:3 9:3 10:1 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:2 19:3 20:3 21:3 22:2 23:3 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:1 2:3 3:3 4:3 5:3 6:0 7:0 8:2 9:0 10:0 11:3 12:0 13:0 14:0 15:3 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:2 27:0 28:3 29:0 30:0 31:2 32:3 -4 0:1 1:1 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:0 16:3 17:2 18:2 19:0 20:3 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:2 32:0 -5 0:2 1:1 2:1 3:2 4:0 5:0 6:3 7:0 8:1 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:1 17:2 18:2 19:0 20:1 21:0 22:1 23:0 24:0 25:0 26:0 27:0 28:0 29:1 30:2 31:1 32:0 -1 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:2 16:2 17:1 18:2 19:0 20:2 21:1 22:2 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -4 0:2 1:2 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:1 15:1 16:3 17:1 18:2 19:0 20:2 21:1 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:1 30:0 31:2 32:0 -2 0:2 1:2 2:2 3:3 4:2 5:2 6:0 7:2 8:0 9:0 10:0 11:3 12:2 13:0 14:0 15:0 16:2 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:3 27:0 28:2 29:0 30:0 31:2 32:3 -4 0:2 1:0 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:2 16:2 17:0 18:0 19:0 20:3 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:1 2:1 3:0 4:1 5:0 6:0 7:0 8:2 9:0 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:2 18:2 19:2 20:2 21:2 22:1 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -5 0:1 1:1 2:0 3:1 4:0 5:0 6:3 7:0 8:1 9:0 10:0 11:0 12:0 13:1 14:0 15:0 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:0 27:1 28:0 29:2 30:2 31:1 32:0 -4 0:1 1:2 2:2 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:1 15:1 16:2 17:1 18:1 19:0 20:3 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:3 32:0 -1 0:3 1:2 2:2 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:3 16:3 17:3 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:1 30:1 31:2 32:0 -2 0:1 1:1 2:2 3:3 4:2 5:2 6:0 7:3 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:1 18:2 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:3 27:0 28:3 29:1 30:0 31:2 32:3 -0 0:3 1:2 2:1 3:2 4:0 5:0 6:0 7:0 8:1 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:0 18:3 19:2 20:2 21:2 22:1 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -1 0:3 1:2 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:2 14:0 15:2 16:1 17:1 18:1 19:0 20:0 21:0 22:1 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:2 1:3 2:3 3:3 4:3 5:0 6:0 7:0 8:3 9:3 10:0 11:0 12:0 13:0 14:0 15:0 16:3 17:2 18:2 19:3 20:3 21:3 22:1 23:3 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:2 1:2 2:1 3:0 4:0 5:0 6:0 7:0 8:1 9:0 10:1 11:0 12:0 13:2 14:0 15:0 16:2 17:1 18:2 19:2 20:1 21:2 22:0 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:0 32:0 -3 0:2 1:1 2:0 3:0 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:0 32:0 -1 0:2 1:2 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:1 16:0 17:1 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:0 32:0 -2 0:2 1:1 2:2 3:3 4:2 5:3 6:0 7:2 8:0 9:0 10:1 11:1 12:0 13:0 14:0 15:2 16:1 17:1 18:2 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:2 27:0 28:2 29:0 30:0 31:0 32:3 -1 0:2 1:1 2:1 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:3 16:2 17:1 18:0 19:0 20:0 21:0 22:2 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -2 0:2 1:1 2:2 3:3 4:2 5:1 6:0 7:2 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:2 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:1 27:0 28:3 29:0 30:0 31:2 32:3 -0 0:3 1:3 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:1 11:0 12:0 13:1 14:0 15:0 16:2 17:2 18:3 19:2 20:2 21:1 22:0 23:2 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -1 0:1 1:1 2:1 3:0 4:0 5:0 6:1 7:0 8:0 9:0 10:0 11:0 12:1 13:1 14:0 15:2 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:1 32:0 -3 0:1 1:1 2:1 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:1 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:1 3:1 4:0 5:0 6:0 7:0 8:2 9:1 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:1 18:1 19:1 20:1 21:1 22:0 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -1 0:2 1:1 2:1 3:0 4:0 5:0 6:0 7:0 8:1 9:0 10:0 11:0 12:0 13:1 14:0 15:1 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -4 0:0 1:1 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:0 16:2 17:2 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -2 0:2 1:1 2:1 3:1 4:1 5:2 6:0 7:1 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:3 16:2 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:0 28:2 29:0 30:0 31:3 32:3 -3 0:2 1:1 2:1 3:3 4:3 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:2 32:0 -5 0:2 1:1 2:1 3:1 4:0 5:0 6:2 7:0 8:3 9:2 10:0 11:0 12:0 13:1 14:0 15:1 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:1 31:1 32:1 -1 0:1 1:2 2:1 3:1 4:0 5:0 6:0 7:0 8:0 9:1 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:0 32:2 -2 0:2 1:0 2:1 3:0 4:0 5:2 6:0 7:1 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:2 16:2 17:2 18:2 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:3 27:0 28:2 29:0 30:0 31:2 32:2 -2 0:3 1:1 2:1 3:2 4:2 5:2 6:0 7:0 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:2 27:0 28:3 29:0 30:0 31:2 32:3 -3 0:1 1:1 2:1 3:0 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:1 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:1 24:0 25:0 26:0 27:0 28:1 29:0 30:0 31:1 32:0 -0 0:2 1:2 2:1 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:1 18:1 19:1 20:2 21:2 22:1 23:2 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:3 1:1 2:1 3:1 4:1 5:0 6:0 7:0 8:0 9:1 10:1 11:0 12:0 13:0 14:0 15:0 16:1 17:0 18:2 19:2 20:2 21:3 22:0 23:3 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:0 1:1 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:1 16:2 17:1 18:0 19:0 20:1 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -0 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:1 10:0 11:0 12:0 13:0 14:0 15:1 16:2 17:0 18:2 19:1 20:3 21:2 22:0 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -1 0:1 1:1 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:1 14:0 15:3 16:1 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:1 2:1 3:3 4:0 5:3 6:0 7:1 8:0 9:0 10:0 11:1 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:3 27:0 28:1 29:0 30:0 31:2 32:2 -3 0:1 1:1 2:1 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:1 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:1 28:0 29:0 30:0 31:1 32:0 -1 0:3 1:3 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:1 14:0 15:3 16:1 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -5 0:2 1:2 2:1 3:1 4:0 5:0 6:1 7:0 8:1 9:1 10:1 11:0 12:0 13:0 14:0 15:1 16:2 17:1 18:1 19:1 20:1 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:1 30:1 31:1 32:0 -0 0:2 1:1 2:1 3:0 4:0 5:0 6:0 7:0 8:1 9:1 10:1 11:0 12:0 13:1 14:0 15:0 16:2 17:2 18:2 19:2 20:2 21:2 22:0 23:1 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -4 0:1 1:1 2:0 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:1 16:3 17:0 18:2 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:3 1:3 2:2 3:1 4:1 5:0 6:0 7:0 8:2 9:2 10:1 11:0 12:0 13:0 14:0 15:0 16:3 17:2 18:3 19:2 20:2 21:2 22:1 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -1 0:2 1:2 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:3 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -2 0:3 1:2 2:2 3:2 4:0 5:2 6:0 7:1 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:1 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:0 28:2 29:0 30:0 31:1 32:3 -0 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:0 18:2 19:3 20:3 21:3 22:1 23:3 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -3 0:1 1:1 2:0 3:1 4:3 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:1 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -4 0:3 1:3 2:3 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:2 16:2 17:2 18:2 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -0 0:3 1:3 2:3 3:0 4:1 5:0 6:0 7:0 8:2 9:0 10:0 11:0 12:0 13:0 14:0 15:1 16:1 17:2 18:2 19:2 20:2 21:2 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -1 0:2 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:1 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -5 0:2 1:1 2:2 3:0 4:0 5:0 6:3 7:0 8:3 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:1 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:3 30:3 31:0 32:0 -2 0:1 1:1 2:1 3:1 4:1 5:0 6:0 7:1 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:3 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:0 28:2 29:0 30:0 31:2 32:3 -4 0:1 1:1 2:1 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:1 15:1 16:1 17:1 18:2 19:0 20:1 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:1 2:2 3:2 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:0 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -1 0:3 1:2 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:2 16:1 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -1 0:2 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:1 16:2 17:0 18:3 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:0 32:0 -0 0:2 1:1 2:2 3:2 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:2 14:0 15:2 16:2 17:2 18:2 19:2 20:2 21:1 22:1 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -2 0:0 1:0 2:0 3:0 4:0 5:0 6:0 7:3 8:0 9:0 10:0 11:1 12:0 13:0 14:0 15:3 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:3 28:3 29:0 30:0 31:3 32:3 -4 0:2 1:2 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:1 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:3 1:2 2:2 3:2 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:1 18:1 19:1 20:2 21:1 22:1 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -1 0:3 1:2 2:2 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:3 14:0 15:2 16:2 17:0 18:3 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -1 0:2 1:1 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:3 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:3 32:0 -1 0:3 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:2 16:1 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -4 0:1 1:1 2:2 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:1 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:2 1:2 2:2 3:3 4:3 5:0 6:0 7:0 8:0 9:2 10:0 11:0 12:1 13:2 14:0 15:1 16:1 17:1 18:1 19:1 20:1 21:1 22:1 23:2 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:3 2:2 3:2 4:1 5:0 6:0 7:0 8:0 9:1 10:0 11:0 12:2 13:2 14:0 15:1 16:2 17:1 18:1 19:1 20:2 21:1 22:2 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:1 2:2 3:1 4:1 5:0 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:3 27:0 28:3 29:0 30:0 31:3 32:3 -0 0:0 1:1 2:2 3:1 4:1 5:0 6:1 7:0 8:2 9:3 10:0 11:0 12:0 13:1 14:0 15:0 16:3 17:1 18:2 19:3 20:3 21:3 22:1 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -3 0:1 1:1 2:1 3:0 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:1 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -1 0:3 1:2 2:1 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:1 11:0 12:1 13:2 14:0 15:3 16:2 17:0 18:1 19:0 20:1 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:1 32:0 -1 0:2 1:1 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:1 14:0 15:2 16:0 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:1 1:1 2:1 3:3 4:0 5:0 6:1 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:1 16:3 17:1 18:1 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:3 2:3 3:3 4:3 5:0 6:0 7:0 8:3 9:3 10:1 11:0 12:0 13:1 14:0 15:0 16:2 17:1 18:2 19:2 20:2 21:2 22:2 23:2 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:1 2:1 3:3 4:2 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:0 28:2 29:0 30:0 31:3 32:3 -3 0:2 1:1 2:0 3:2 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -1 0:2 1:2 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:3 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -4 0:1 1:1 2:1 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:0 16:2 17:0 18:0 19:0 20:3 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:2 1:2 2:2 3:2 4:2 5:0 6:0 7:0 8:2 9:1 10:0 11:0 12:0 13:0 14:0 15:2 16:0 17:2 18:2 19:2 20:2 21:2 22:1 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -5 0:2 1:2 2:1 3:0 4:0 5:0 6:2 7:0 8:2 9:0 10:1 11:0 12:0 13:0 14:0 15:1 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:1 30:1 31:1 32:0 -1 0:2 1:2 2:1 3:1 4:0 5:0 6:0 7:0 8:1 9:0 10:0 11:0 12:1 13:1 14:0 15:2 16:2 17:1 18:1 19:0 20:1 21:0 22:1 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -4 0:1 1:1 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:0 16:2 17:0 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:3 1:3 2:3 3:2 4:2 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:1 17:0 18:1 19:1 20:1 21:1 22:1 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -1 0:2 1:3 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:0 14:0 15:3 16:2 17:0 18:2 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -1 0:1 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:2 13:0 14:0 15:2 16:3 17:0 18:1 19:0 20:1 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:2 3:2 4:2 5:0 6:0 7:0 8:2 9:2 10:1 11:0 12:0 13:1 14:0 15:0 16:2 17:1 18:2 19:2 20:2 21:1 22:0 23:2 24:0 25:0 26:1 27:0 28:0 29:0 30:0 31:1 32:0 -3 0:1 1:1 2:0 3:1 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:0 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -0 0:1 1:1 2:1 3:1 4:1 5:0 6:0 7:0 8:1 9:1 10:0 11:0 12:0 13:2 14:0 15:0 16:1 17:2 18:1 19:1 20:1 21:2 22:0 23:3 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:2 2:3 3:2 4:1 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:3 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:0 28:2 29:0 30:0 31:2 32:3 -0 0:3 1:2 2:1 3:2 4:2 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:3 18:2 19:2 20:2 21:3 22:0 23:3 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:3 32:1 -0 0:2 1:2 2:2 3:3 4:2 5:0 6:0 7:0 8:3 9:3 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:0 18:2 19:2 20:2 21:2 22:2 23:2 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:2 2:2 3:3 4:2 5:2 6:0 7:2 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:3 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:2 28:3 29:0 30:0 31:3 32:3 -4 0:1 1:1 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:0 16:2 17:0 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:2 2:3 3:2 4:3 5:3 6:0 7:3 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:3 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:2 27:2 28:2 29:0 30:0 31:2 32:2 -2 0:1 1:1 2:1 3:2 4:2 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:0 28:2 29:0 30:0 31:3 32:3 -4 0:2 1:2 2:2 3:3 4:0 5:0 6:1 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:1 16:2 17:2 18:2 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -1 0:2 1:2 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:2 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:2 2:3 3:2 4:2 5:2 6:0 7:3 8:0 9:0 10:0 11:2 12:0 13:0 14:2 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:3 28:2 29:0 30:0 31:2 32:2 -3 0:2 1:2 2:0 3:2 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:0 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -0 0:1 1:1 2:1 3:1 4:1 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:1 18:1 19:2 20:2 21:1 22:0 23:2 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:0 32:2 -1 0:3 1:3 2:2 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:0 16:2 17:0 18:2 19:0 20:1 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -4 0:2 1:2 2:2 3:1 4:0 5:0 6:2 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:1 16:3 17:2 18:2 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -5 0:1 1:1 2:2 3:0 4:0 5:0 6:3 7:0 8:3 9:0 10:1 11:0 12:0 13:0 14:0 15:2 16:1 17:1 18:1 19:1 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:2 30:2 31:2 32:0 -0 0:2 1:3 2:3 3:3 4:3 5:0 6:0 7:0 8:2 9:1 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:1 18:3 19:3 20:3 21:3 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:2 3:2 4:2 5:0 6:0 7:0 8:0 9:3 10:0 11:0 12:0 13:2 14:0 15:0 16:3 17:1 18:3 19:3 20:3 21:2 22:0 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -1 0:2 1:2 2:1 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:2 16:3 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -5 0:2 1:2 2:2 3:0 4:0 5:0 6:2 7:0 8:2 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:2 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:2 30:2 31:2 32:0 -2 0:2 1:2 2:2 3:2 4:3 5:2 6:0 7:3 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:3 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:0 28:3 29:0 30:0 31:3 32:3 -4 0:1 1:1 2:0 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:0 16:3 17:0 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -3 0:2 1:1 2:0 3:2 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -0 0:1 1:1 2:2 3:2 4:2 5:0 6:2 7:0 8:1 9:2 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:1 18:2 19:3 20:3 21:3 22:2 23:2 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:2 3:3 4:2 5:0 6:0 7:0 8:1 9:1 10:0 11:0 12:0 13:1 14:0 15:0 16:1 17:1 18:1 19:1 20:1 21:1 22:1 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -2 0:3 1:2 2:2 3:1 4:3 5:3 6:0 7:3 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:3 16:2 17:0 18:3 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:3 28:3 29:0 30:0 31:2 32:2 -4 0:2 1:2 2:1 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:0 16:3 17:0 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -4 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:1 15:0 16:2 17:0 18:0 19:0 20:1 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:1 1:1 2:1 3:1 4:1 5:0 6:1 7:0 8:2 9:3 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:2 18:1 19:2 20:2 21:2 22:2 23:2 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -3 0:2 1:2 2:0 3:2 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:0 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:3 32:0 -1 0:3 1:3 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:2 13:0 14:0 15:2 16:3 17:0 18:2 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -2 0:2 1:3 2:2 3:3 4:3 5:3 6:0 7:2 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:3 16:2 17:0 18:3 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:2 27:2 28:2 29:0 30:0 31:2 32:2 -0 0:3 1:3 2:3 3:3 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:0 16:2 17:1 18:1 19:1 20:2 21:1 22:1 23:2 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -1 0:2 1:2 2:1 3:2 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:1 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:3 2:1 3:2 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:0 16:1 17:0 18:0 19:2 20:1 21:2 22:2 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:2 3:3 4:2 5:0 6:0 7:0 8:0 9:2 10:0 11:0 12:0 13:2 14:0 15:2 16:2 17:1 18:2 19:3 20:3 21:3 22:3 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -2 0:2 1:1 2:1 3:3 4:2 5:2 6:0 7:2 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:3 16:0 17:2 18:0 19:0 20:0 21:0 22:0 23:3 24:0 25:2 26:0 27:2 28:2 29:0 30:0 31:3 32:2 -2 0:1 1:1 2:2 3:3 4:2 5:3 6:0 7:3 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:2 28:3 29:0 30:0 31:2 32:3 -2 0:2 1:2 2:3 3:3 4:1 5:2 6:0 7:2 8:0 9:0 10:0 11:1 12:0 13:0 14:0 15:3 16:3 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:2 27:1 28:2 29:0 30:0 31:2 32:3 -1 0:3 1:2 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:1 14:0 15:3 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -4 0:1 1:1 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:0 14:2 15:0 16:3 17:0 18:1 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -3 0:2 1:2 2:2 3:0 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -1 0:2 1:3 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -1 0:1 1:2 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:3 2:2 3:1 4:0 5:0 6:0 7:0 8:1 9:2 10:1 11:0 12:0 13:1 14:0 15:2 16:1 17:0 18:1 19:2 20:2 21:1 22:1 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:3 2:2 3:2 4:0 5:0 6:0 7:0 8:2 9:0 10:0 11:0 12:0 13:1 14:0 15:2 16:1 17:0 18:2 19:1 20:1 21:1 22:1 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:1 9:0 10:0 11:0 12:0 13:2 14:0 15:1 16:3 17:0 18:1 19:2 20:2 21:1 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -4 0:1 1:0 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:0 16:3 17:1 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -2 0:2 1:1 2:1 3:1 4:0 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:2 27:2 28:2 29:0 30:0 31:3 32:3 -1 0:2 1:3 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:3 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -2 0:3 1:3 2:2 3:2 4:0 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:3 28:3 29:0 30:0 31:2 32:3 -1 0:2 1:2 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -1 0:3 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:1 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:1 2:2 3:0 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -1 0:2 1:1 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:3 32:0 -2 0:3 1:2 2:2 3:2 4:0 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:1 27:1 28:1 29:0 30:0 31:2 32:2 -5 0:2 1:2 2:0 3:0 4:0 5:0 6:2 7:0 8:1 9:1 10:1 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:1 30:2 31:1 32:0 -1 0:3 1:2 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:0 32:0 -0 0:3 1:2 2:2 3:3 4:1 5:0 6:0 7:0 8:1 9:1 10:1 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:2 19:2 20:3 21:2 22:1 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:2 1:3 2:2 3:2 4:1 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:3 19:3 20:3 21:2 22:2 23:2 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:2 2:3 3:3 4:1 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:3 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:0 28:0 29:0 30:0 31:2 32:3 -3 0:2 1:2 2:2 3:0 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:1 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -2 0:2 1:2 2:3 3:3 4:1 5:3 6:0 7:2 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:2 16:3 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:2 28:2 29:0 30:0 31:3 32:3 -2 0:1 1:1 2:2 3:3 4:1 5:2 6:0 7:2 8:0 9:0 10:0 11:1 12:0 13:0 14:0 15:1 16:3 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:2 27:2 28:3 29:0 30:0 31:2 32:3 -0 0:2 1:3 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:2 19:3 20:2 21:1 22:3 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:3 3:0 4:0 5:0 6:0 7:0 8:3 9:0 10:0 11:0 12:0 13:3 14:0 15:0 16:1 17:0 18:1 19:2 20:2 21:0 22:2 23:0 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:3 1:3 2:3 3:0 4:0 5:0 6:0 7:0 8:2 9:0 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:2 19:2 20:3 21:0 22:3 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:3 1:2 2:2 3:2 4:0 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:3 17:0 18:3 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:3 28:2 29:0 30:0 31:2 32:3 -0 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:3 9:2 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:2 19:2 20:3 21:2 22:3 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -0 0:3 1:2 2:3 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:2 14:0 15:0 16:3 17:0 18:2 19:2 20:2 21:3 22:3 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -1 0:2 1:2 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:3 16:3 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -2 0:1 1:2 2:2 3:2 4:0 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:3 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:2 27:2 28:2 29:0 30:0 31:2 32:2 -2 0:2 1:2 2:2 3:2 4:0 5:2 6:0 7:3 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:3 16:3 17:0 18:3 19:0 20:0 21:0 22:0 23:0 24:3 25:0 26:3 27:3 28:3 29:0 30:0 31:2 32:2 -0 0:2 1:3 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:0 10:0 11:0 12:0 13:3 14:0 15:0 16:3 17:0 18:2 19:2 20:2 21:0 22:2 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:1 19:0 20:2 21:2 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:1 1:3 2:1 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:2 19:0 20:3 21:3 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -0 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:3 14:0 15:0 16:3 17:0 18:3 19:0 20:3 21:3 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -5 0:2 1:2 2:1 3:0 4:0 5:0 6:3 7:0 8:2 9:0 10:1 11:0 12:0 13:0 14:0 15:2 16:2 17:1 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:3 30:3 31:2 32:0 -1 0:2 1:2 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:1 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:1 32:0 -1 0:3 1:2 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:2 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -1 0:2 1:3 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:2 14:0 15:3 16:3 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -1 0:3 1:2 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:1 11:0 12:0 13:2 14:0 15:2 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:3 32:0 -0 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:3 9:0 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:2 19:2 20:2 21:0 22:2 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:1 2:2 3:3 4:1 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:1 16:3 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:0 28:3 29:0 30:0 31:3 32:2 -2 0:2 1:3 2:3 3:3 4:0 5:3 6:0 7:3 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:1 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:0 28:3 29:0 30:0 31:2 32:2 -2 0:3 1:2 2:2 3:2 4:0 5:2 6:0 7:0 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:3 16:2 17:0 18:3 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:3 27:0 28:2 29:0 30:0 31:3 32:3 -0 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:1 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:3 19:3 20:2 21:2 22:2 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:3 9:3 10:1 11:0 12:0 13:3 14:0 15:0 16:3 17:0 18:2 19:3 20:2 21:3 22:2 23:0 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -1 0:2 1:2 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:1 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:1 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -2 0:3 1:2 2:3 3:3 4:1 5:2 6:0 7:2 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:2 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:2 27:0 28:2 29:0 30:0 31:2 32:3 -1 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:1 14:0 15:1 16:1 17:0 18:2 19:0 20:0 21:0 22:1 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -1 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:2 14:0 15:2 16:0 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -1 0:3 1:2 2:1 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:1 14:0 15:2 16:0 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -4 0:1 1:1 2:0 3:2 4:0 5:0 6:1 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:1 16:2 17:1 18:0 19:0 20:1 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:1 1:1 2:0 3:1 4:0 5:0 6:2 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:1 15:2 16:1 17:2 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:2 1:2 2:0 3:2 4:0 5:0 6:1 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:1 16:2 17:1 18:0 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -4 0:1 1:1 2:0 3:1 4:0 5:0 6:2 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:2 16:1 17:2 18:0 19:0 20:1 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -4 0:2 1:2 2:0 3:2 4:0 5:0 6:1 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:1 15:1 16:2 17:2 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:1 -0 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:1 11:0 12:0 13:2 14:0 15:0 16:3 17:2 18:1 19:2 20:2 21:2 22:1 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:3 2:2 3:0 4:1 5:0 6:0 7:0 8:2 9:3 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:3 18:1 19:2 20:2 21:2 22:1 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:3 1:2 2:2 3:0 4:2 5:0 6:0 7:0 8:3 9:2 10:0 11:0 12:0 13:3 14:0 15:0 16:3 17:2 18:2 19:2 20:2 21:2 22:1 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:3 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:3 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:2 18:2 19:2 20:2 21:3 22:2 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:3 2:2 3:0 4:0 5:0 6:0 7:0 8:3 9:2 10:0 11:0 12:0 13:2 14:0 15:0 16:3 17:2 18:3 19:2 20:2 21:3 22:2 23:1 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:2 14:0 15:0 16:3 17:1 18:2 19:3 20:3 21:2 22:3 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:3 2:2 3:0 4:0 5:0 6:0 7:0 8:3 9:2 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:1 18:2 19:3 20:3 21:2 22:2 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -2 0:2 1:2 2:2 3:3 4:1 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:2 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:2 26:2 27:1 28:3 29:0 30:0 31:2 32:3 -2 0:2 1:2 2:2 3:2 4:1 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:3 25:2 26:2 27:2 28:2 29:0 30:0 31:3 32:3 -2 0:3 1:2 2:3 3:2 4:2 5:2 6:0 7:2 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:2 16:3 17:0 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:1 26:2 27:2 28:1 29:0 30:0 31:2 32:3 -2 0:2 1:3 2:2 3:3 4:3 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:3 25:2 26:2 27:1 28:2 29:0 30:0 31:2 32:2 -2 0:2 1:2 2:3 3:2 4:2 5:2 6:0 7:3 8:0 9:0 10:0 11:3 12:0 13:0 14:0 15:2 16:3 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:2 25:2 26:3 27:2 28:2 29:0 30:0 31:2 32:3 -2 0:3 1:2 2:2 3:3 4:3 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:3 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:2 25:2 26:2 27:2 28:2 29:0 30:0 31:2 32:2 -3 0:3 1:2 2:1 3:0 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -3 0:2 1:2 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -3 0:3 1:1 2:1 3:0 4:1 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:3 32:0 -3 0:2 1:2 2:2 3:1 4:2 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:0 15:3 16:2 17:0 18:0 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -4 0:1 1:1 2:1 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:1 16:3 17:0 18:1 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:2 1:1 2:2 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:0 16:3 17:0 18:2 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:1 32:0 -4 0:2 1:1 2:3 3:3 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:0 16:2 17:0 18:2 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:1 32:0 -4 0:2 1:1 2:2 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:1 15:0 16:1 17:0 18:1 19:0 20:1 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -4 0:1 1:2 2:1 3:1 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:0 16:2 17:0 18:2 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:1 1:1 2:1 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:3 15:0 16:2 17:0 18:2 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -4 0:2 1:1 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:0 13:0 14:2 15:0 16:2 17:0 18:2 19:0 20:2 21:0 22:0 23:0 24:0 25:0 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -1 0:2 1:2 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:0 10:0 11:0 12:1 13:1 14:0 15:2 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:3 28:0 29:0 30:0 31:2 32:0 -1 0:3 1:2 2:0 3:1 4:0 5:0 6:0 7:0 8:0 9:1 10:0 11:0 12:2 13:2 14:0 15:3 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:3 32:0 -1 0:2 1:3 2:0 3:3 4:0 5:0 6:0 7:0 8:0 9:2 10:0 11:0 12:2 13:2 14:0 15:2 16:3 17:0 18:3 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:0 30:0 31:2 32:0 -1 0:3 1:2 2:0 3:2 4:0 5:0 6:0 7:0 8:0 9:2 10:0 11:0 12:2 13:2 14:0 15:2 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:2 3:1 4:1 5:0 6:0 7:0 8:2 9:0 10:1 11:0 12:0 13:2 14:0 15:1 16:2 17:1 18:2 19:2 20:2 21:2 22:1 23:1 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:3 3:0 4:1 5:0 6:0 7:0 8:1 9:2 10:0 11:0 12:0 13:2 14:0 15:2 16:1 17:2 18:1 19:1 20:1 21:1 22:1 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:2 3:1 4:0 5:0 6:0 7:0 8:0 9:1 10:1 11:0 12:0 13:1 14:0 15:2 16:1 17:2 18:2 19:2 20:1 21:2 22:0 23:0 24:0 25:1 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:3 3:2 4:0 5:0 6:0 7:0 8:0 9:2 10:1 11:0 12:0 13:0 14:0 15:2 16:1 17:3 18:2 19:2 20:2 21:2 22:0 23:0 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -0 0:2 1:2 2:1 3:1 4:0 5:0 6:0 7:0 8:0 9:2 10:1 11:0 12:0 13:0 14:0 15:2 16:2 17:2 18:2 19:1 20:2 21:2 22:0 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:2 3:2 4:0 5:0 6:0 7:0 8:0 9:1 10:0 11:0 12:0 13:0 14:0 15:2 16:2 17:3 18:2 19:2 20:1 21:1 22:0 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -5 0:2 1:2 2:2 3:1 4:0 5:0 6:2 7:0 8:2 9:2 10:1 11:0 12:0 13:0 14:0 15:1 16:2 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:2 28:0 29:2 30:2 31:2 32:0 -5 0:3 1:2 2:0 3:0 4:0 5:0 6:2 7:0 8:2 9:2 10:0 11:0 12:0 13:0 14:0 15:2 16:1 17:0 18:2 19:0 20:0 21:0 22:0 23:0 24:0 25:0 26:0 27:1 28:0 29:2 30:3 31:3 32:0 -0 0:2 1:2 2:2 3:3 4:0 5:0 6:0 7:0 8:2 9:2 10:1 11:0 12:0 13:1 14:0 15:0 16:2 17:1 18:2 19:3 20:3 21:3 22:0 23:1 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:3 1:2 2:2 3:3 4:0 5:0 6:0 7:0 8:2 9:0 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:2 18:2 19:2 20:2 21:2 22:0 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:1 32:0 -0 0:2 1:2 2:2 3:2 4:0 5:0 6:0 7:0 8:2 9:2 10:1 11:0 12:0 13:1 14:0 15:0 16:3 17:0 18:3 19:2 20:2 21:2 22:0 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:1 2:2 3:0 4:0 5:0 6:0 7:0 8:3 9:2 10:1 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:2 19:3 20:2 21:2 22:0 23:2 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:3 2:3 3:0 4:0 5:0 6:0 7:0 8:2 9:3 10:0 11:0 12:0 13:1 14:0 15:0 16:3 17:0 18:3 19:3 20:3 21:3 22:0 23:1 24:0 25:3 26:0 27:0 28:0 29:0 30:0 31:3 32:0 -0 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:3 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:2 19:3 20:2 21:2 22:0 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:2 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:2 9:2 10:0 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:2 19:2 20:2 21:2 22:0 23:2 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -0 0:3 1:2 2:2 3:0 4:0 5:0 6:0 7:0 8:0 9:1 10:1 11:0 12:0 13:2 14:0 15:0 16:2 17:0 18:1 19:1 20:2 21:2 22:1 23:0 24:0 25:2 26:0 27:0 28:0 29:0 30:0 31:2 32:0 -2 0:2 1:1 2:1 3:2 4:2 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:1 13:0 14:0 15:3 16:2 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:2 28:3 29:0 30:0 31:2 32:3 -2 0:2 1:2 2:2 3:2 4:3 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:0 13:0 14:0 15:2 16:3 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:3 28:2 29:0 30:0 31:3 32:2 -2 0:2 1:2 2:2 3:2 4:1 5:2 6:0 7:2 8:0 9:0 10:0 11:2 12:1 13:0 14:0 15:3 16:2 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:2 28:3 29:0 30:0 31:2 32:3 -2 0:2 1:2 2:2 3:2 4:1 5:2 6:0 7:2 8:0 9:0 10:0 11:3 12:1 13:0 14:0 15:2 16:2 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:3 28:2 29:0 30:0 31:2 32:3 -2 0:2 1:1 2:2 3:2 4:0 5:3 6:0 7:2 8:0 9:0 10:0 11:2 12:1 13:0 14:0 15:3 16:2 17:1 18:1 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:3 28:2 29:0 30:0 31:3 32:2 -2 0:3 1:1 2:2 3:3 4:0 5:3 6:0 7:0 8:0 9:1 10:0 11:2 12:1 13:0 14:0 15:2 16:3 17:1 18:2 19:0 20:0 21:0 22:0 23:0 24:1 25:0 26:3 27:2 28:3 29:0 30:0 31:2 32:2 -2 0:2 1:1 2:2 3:2 4:0 5:2 6:0 7:0 8:0 9:1 10:0 11:1 12:1 13:0 14:0 15:2 16:3 17:1 18:2 19:0 20:0 21:0 22:0 23:0 24:2 25:0 26:2 27:2 28:2 29:0 30:0 31:2 32:2 diff --git a/ml-xgboost/demo/data/dermatology_process.py b/ml-xgboost/demo/data/dermatology_process.py deleted file mode 100644 index 3fdb012..0000000 --- a/ml-xgboost/demo/data/dermatology_process.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/python - -import numpy as np - -# label need to be 0 to num_class -1 -data = np.loadtxt('./dermatology.data', delimiter=',', - converters={33: lambda x:int(x == '?'), 34: lambda x:int(x) - 1}) -sz = data.shape - -train = data[:int(sz[0] * 0.7), :] -test = data[int(sz[0] * 0.7):, :] - -train_X = train[:, :33] -train_Y = train[:, 34] - -test_X = test[:, :33] -test_Y = test[:, 34] - -def process(X, Y): - s = "" - for i in range(len(X)): - s += str(int(Y[i])) - for j in range(len(X[i])): - s += (" %d:%d" % (j, int(X[i][j]))) - s += "\n" - return s - -with open("./dermatology.data.train", 'w') as fp: - fp.write(process(train_X, train_Y)) - -with open("./dermatology.data.test", 'w') as fp: - fp.write(process(test_X, test_Y)) \ No newline at end of file diff --git a/ml-xgboost/demo/data/featmap.txt b/ml-xgboost/demo/data/featmap.txt deleted file mode 100644 index c9e6465..0000000 --- a/ml-xgboost/demo/data/featmap.txt +++ /dev/null @@ -1,126 +0,0 @@ -0 cap-shape=bell i -1 cap-shape=conical i -2 cap-shape=convex i -3 cap-shape=flat i -4 cap-shape=knobbed i -5 cap-shape=sunken i -6 cap-surface=fibrous i -7 cap-surface=grooves i -8 cap-surface=scaly i -9 cap-surface=smooth i -10 cap-color=brown i -11 cap-color=buff i -12 cap-color=cinnamon i -13 cap-color=gray i -14 cap-color=green i -15 cap-color=pink i -16 cap-color=purple i -17 cap-color=red i -18 cap-color=white i -19 cap-color=yellow i -20 bruises?=bruises i -21 bruises?=no i -22 odor=almond i -23 odor=anise i -24 odor=creosote i -25 odor=fishy i -26 odor=foul i -27 odor=musty i -28 odor=none i -29 odor=pungent i -30 odor=spicy i -31 gill-attachment=attached i -32 gill-attachment=descending i -33 gill-attachment=free i -34 gill-attachment=notched i -35 gill-spacing=close i -36 gill-spacing=crowded i -37 gill-spacing=distant i -38 gill-size=broad i -39 gill-size=narrow i -40 gill-color=black i -41 gill-color=brown i -42 gill-color=buff i -43 gill-color=chocolate i -44 gill-color=gray i -45 gill-color=green i -46 gill-color=orange i -47 gill-color=pink i -48 gill-color=purple i -49 gill-color=red i -50 gill-color=white i -51 gill-color=yellow i -52 stalk-shape=enlarging i -53 stalk-shape=tapering i -54 stalk-root=bulbous i -55 stalk-root=club i -56 stalk-root=cup i -57 stalk-root=equal i -58 stalk-root=rhizomorphs i -59 stalk-root=rooted i -60 stalk-root=missing i -61 stalk-surface-above-ring=fibrous i -62 stalk-surface-above-ring=scaly i -63 stalk-surface-above-ring=silky i -64 stalk-surface-above-ring=smooth i -65 stalk-surface-below-ring=fibrous i -66 stalk-surface-below-ring=scaly i -67 stalk-surface-below-ring=silky i -68 stalk-surface-below-ring=smooth i -69 stalk-color-above-ring=brown i -70 stalk-color-above-ring=buff i -71 stalk-color-above-ring=cinnamon i -72 stalk-color-above-ring=gray i -73 stalk-color-above-ring=orange i -74 stalk-color-above-ring=pink i -75 stalk-color-above-ring=red i -76 stalk-color-above-ring=white i -77 stalk-color-above-ring=yellow i -78 stalk-color-below-ring=brown i -79 stalk-color-below-ring=buff i -80 stalk-color-below-ring=cinnamon i -81 stalk-color-below-ring=gray i -82 stalk-color-below-ring=orange i -83 stalk-color-below-ring=pink i -84 stalk-color-below-ring=red i -85 stalk-color-below-ring=white i -86 stalk-color-below-ring=yellow i -87 veil-type=partial i -88 veil-type=universal i -89 veil-color=brown i -90 veil-color=orange i -91 veil-color=white i -92 veil-color=yellow i -93 ring-number=none i -94 ring-number=one i -95 ring-number=two i -96 ring-type=cobwebby i -97 ring-type=evanescent i -98 ring-type=flaring i -99 ring-type=large i -100 ring-type=none i -101 ring-type=pendant i -102 ring-type=sheathing i -103 ring-type=zone i -104 spore-print-color=black i -105 spore-print-color=brown i -106 spore-print-color=buff i -107 spore-print-color=chocolate i -108 spore-print-color=green i -109 spore-print-color=orange i -110 spore-print-color=purple i -111 spore-print-color=white i -112 spore-print-color=yellow i -113 population=abundant i -114 population=clustered i -115 population=numerous i -116 population=scattered i -117 population=several i -118 population=solitary i -119 habitat=grasses i -120 habitat=leaves i -121 habitat=meadows i -122 habitat=paths i -123 habitat=urban i -124 habitat=waste i -125 habitat=woods i diff --git a/ml-xgboost/demo/data/gen_autoclaims.R b/ml-xgboost/demo/data/gen_autoclaims.R deleted file mode 100644 index 5465db0..0000000 --- a/ml-xgboost/demo/data/gen_autoclaims.R +++ /dev/null @@ -1,18 +0,0 @@ -site <- 'http://cran.r-project.org' -if (!require('dummies')) - install.packages('dummies', repos=site) -if (!require('insuranceData')) - install.packages('insuranceData', repos=site) - -library(dummies) -library(insuranceData) - -data(AutoClaims) -data = AutoClaims - -data$STATE = as.factor(data$STATE) -data$CLASS = as.factor(data$CLASS) -data$GENDER = as.factor(data$GENDER) - -data.dummy <- dummy.data.frame(data, dummy.class='factor', omit.constants=T); -write.table(data.dummy, 'autoclaims.csv', sep=',', row.names=F, col.names=F, quote=F) diff --git a/ml-xgboost/demo/data/veterans_lung_cancer.csv b/ml-xgboost/demo/data/veterans_lung_cancer.csv deleted file mode 100644 index 24466b5..0000000 --- a/ml-xgboost/demo/data/veterans_lung_cancer.csv +++ /dev/null @@ -1,138 +0,0 @@ -Survival_label_lower_bound,Survival_label_upper_bound,Age_in_years,Karnofsky_score,Months_from_Diagnosis,Celltype=adeno,Celltype=large,Celltype=smallcell,Celltype=squamous,Prior_therapy=no,Prior_therapy=yes,Treatment=standard,Treatment=test -72.0,72.0,69.0,60.0,7.0,0,0,0,1,1,0,1,0 -411.0,411.0,64.0,70.0,5.0,0,0,0,1,0,1,1,0 -228.0,228.0,38.0,60.0,3.0,0,0,0,1,1,0,1,0 -126.0,126.0,63.0,60.0,9.0,0,0,0,1,0,1,1,0 -118.0,118.0,65.0,70.0,11.0,0,0,0,1,0,1,1,0 -10.0,10.0,49.0,20.0,5.0,0,0,0,1,1,0,1,0 -82.0,82.0,69.0,40.0,10.0,0,0,0,1,0,1,1,0 -110.0,110.0,68.0,80.0,29.0,0,0,0,1,1,0,1,0 -314.0,314.0,43.0,50.0,18.0,0,0,0,1,1,0,1,0 -100.0,inf,70.0,70.0,6.0,0,0,0,1,1,0,1,0 -42.0,42.0,81.0,60.0,4.0,0,0,0,1,1,0,1,0 -8.0,8.0,63.0,40.0,58.0,0,0,0,1,0,1,1,0 -144.0,144.0,63.0,30.0,4.0,0,0,0,1,1,0,1,0 -25.0,inf,52.0,80.0,9.0,0,0,0,1,0,1,1,0 -11.0,11.0,48.0,70.0,11.0,0,0,0,1,0,1,1,0 -30.0,30.0,61.0,60.0,3.0,0,0,1,0,1,0,1,0 -384.0,384.0,42.0,60.0,9.0,0,0,1,0,1,0,1,0 -4.0,4.0,35.0,40.0,2.0,0,0,1,0,1,0,1,0 -54.0,54.0,63.0,80.0,4.0,0,0,1,0,0,1,1,0 -13.0,13.0,56.0,60.0,4.0,0,0,1,0,1,0,1,0 -123.0,inf,55.0,40.0,3.0,0,0,1,0,1,0,1,0 -97.0,inf,67.0,60.0,5.0,0,0,1,0,1,0,1,0 -153.0,153.0,63.0,60.0,14.0,0,0,1,0,0,1,1,0 -59.0,59.0,65.0,30.0,2.0,0,0,1,0,1,0,1,0 -117.0,117.0,46.0,80.0,3.0,0,0,1,0,1,0,1,0 -16.0,16.0,53.0,30.0,4.0,0,0,1,0,0,1,1,0 -151.0,151.0,69.0,50.0,12.0,0,0,1,0,1,0,1,0 -22.0,22.0,68.0,60.0,4.0,0,0,1,0,1,0,1,0 -56.0,56.0,43.0,80.0,12.0,0,0,1,0,0,1,1,0 -21.0,21.0,55.0,40.0,2.0,0,0,1,0,0,1,1,0 -18.0,18.0,42.0,20.0,15.0,0,0,1,0,1,0,1,0 -139.0,139.0,64.0,80.0,2.0,0,0,1,0,1,0,1,0 -20.0,20.0,65.0,30.0,5.0,0,0,1,0,1,0,1,0 -31.0,31.0,65.0,75.0,3.0,0,0,1,0,1,0,1,0 -52.0,52.0,55.0,70.0,2.0,0,0,1,0,1,0,1,0 -287.0,287.0,66.0,60.0,25.0,0,0,1,0,0,1,1,0 -18.0,18.0,60.0,30.0,4.0,0,0,1,0,1,0,1,0 -51.0,51.0,67.0,60.0,1.0,0,0,1,0,1,0,1,0 -122.0,122.0,53.0,80.0,28.0,0,0,1,0,1,0,1,0 -27.0,27.0,62.0,60.0,8.0,0,0,1,0,1,0,1,0 -54.0,54.0,67.0,70.0,1.0,0,0,1,0,1,0,1,0 -7.0,7.0,72.0,50.0,7.0,0,0,1,0,1,0,1,0 -63.0,63.0,48.0,50.0,11.0,0,0,1,0,1,0,1,0 -392.0,392.0,68.0,40.0,4.0,0,0,1,0,1,0,1,0 -10.0,10.0,67.0,40.0,23.0,0,0,1,0,0,1,1,0 -8.0,8.0,61.0,20.0,19.0,1,0,0,0,0,1,1,0 -92.0,92.0,60.0,70.0,10.0,1,0,0,0,1,0,1,0 -35.0,35.0,62.0,40.0,6.0,1,0,0,0,1,0,1,0 -117.0,117.0,38.0,80.0,2.0,1,0,0,0,1,0,1,0 -132.0,132.0,50.0,80.0,5.0,1,0,0,0,1,0,1,0 -12.0,12.0,63.0,50.0,4.0,1,0,0,0,0,1,1,0 -162.0,162.0,64.0,80.0,5.0,1,0,0,0,1,0,1,0 -3.0,3.0,43.0,30.0,3.0,1,0,0,0,1,0,1,0 -95.0,95.0,34.0,80.0,4.0,1,0,0,0,1,0,1,0 -177.0,177.0,66.0,50.0,16.0,0,1,0,0,0,1,1,0 -162.0,162.0,62.0,80.0,5.0,0,1,0,0,1,0,1,0 -216.0,216.0,52.0,50.0,15.0,0,1,0,0,1,0,1,0 -553.0,553.0,47.0,70.0,2.0,0,1,0,0,1,0,1,0 -278.0,278.0,63.0,60.0,12.0,0,1,0,0,1,0,1,0 -12.0,12.0,68.0,40.0,12.0,0,1,0,0,0,1,1,0 -260.0,260.0,45.0,80.0,5.0,0,1,0,0,1,0,1,0 -200.0,200.0,41.0,80.0,12.0,0,1,0,0,0,1,1,0 -156.0,156.0,66.0,70.0,2.0,0,1,0,0,1,0,1,0 -182.0,inf,62.0,90.0,2.0,0,1,0,0,1,0,1,0 -143.0,143.0,60.0,90.0,8.0,0,1,0,0,1,0,1,0 -105.0,105.0,66.0,80.0,11.0,0,1,0,0,1,0,1,0 -103.0,103.0,38.0,80.0,5.0,0,1,0,0,1,0,1,0 -250.0,250.0,53.0,70.0,8.0,0,1,0,0,0,1,1,0 -100.0,100.0,37.0,60.0,13.0,0,1,0,0,0,1,1,0 -999.0,999.0,54.0,90.0,12.0,0,0,0,1,0,1,0,1 -112.0,112.0,60.0,80.0,6.0,0,0,0,1,1,0,0,1 -87.0,inf,48.0,80.0,3.0,0,0,0,1,1,0,0,1 -231.0,inf,52.0,50.0,8.0,0,0,0,1,0,1,0,1 -242.0,242.0,70.0,50.0,1.0,0,0,0,1,1,0,0,1 -991.0,991.0,50.0,70.0,7.0,0,0,0,1,0,1,0,1 -111.0,111.0,62.0,70.0,3.0,0,0,0,1,1,0,0,1 -1.0,1.0,65.0,20.0,21.0,0,0,0,1,0,1,0,1 -587.0,587.0,58.0,60.0,3.0,0,0,0,1,1,0,0,1 -389.0,389.0,62.0,90.0,2.0,0,0,0,1,1,0,0,1 -33.0,33.0,64.0,30.0,6.0,0,0,0,1,1,0,0,1 -25.0,25.0,63.0,20.0,36.0,0,0,0,1,1,0,0,1 -357.0,357.0,58.0,70.0,13.0,0,0,0,1,1,0,0,1 -467.0,467.0,64.0,90.0,2.0,0,0,0,1,1,0,0,1 -201.0,201.0,52.0,80.0,28.0,0,0,0,1,0,1,0,1 -1.0,1.0,35.0,50.0,7.0,0,0,0,1,1,0,0,1 -30.0,30.0,63.0,70.0,11.0,0,0,0,1,1,0,0,1 -44.0,44.0,70.0,60.0,13.0,0,0,0,1,0,1,0,1 -283.0,283.0,51.0,90.0,2.0,0,0,0,1,1,0,0,1 -15.0,15.0,40.0,50.0,13.0,0,0,0,1,0,1,0,1 -25.0,25.0,69.0,30.0,2.0,0,0,1,0,1,0,0,1 -103.0,inf,36.0,70.0,22.0,0,0,1,0,0,1,0,1 -21.0,21.0,71.0,20.0,4.0,0,0,1,0,1,0,0,1 -13.0,13.0,62.0,30.0,2.0,0,0,1,0,1,0,0,1 -87.0,87.0,60.0,60.0,2.0,0,0,1,0,1,0,0,1 -2.0,2.0,44.0,40.0,36.0,0,0,1,0,0,1,0,1 -20.0,20.0,54.0,30.0,9.0,0,0,1,0,0,1,0,1 -7.0,7.0,66.0,20.0,11.0,0,0,1,0,1,0,0,1 -24.0,24.0,49.0,60.0,8.0,0,0,1,0,1,0,0,1 -99.0,99.0,72.0,70.0,3.0,0,0,1,0,1,0,0,1 -8.0,8.0,68.0,80.0,2.0,0,0,1,0,1,0,0,1 -99.0,99.0,62.0,85.0,4.0,0,0,1,0,1,0,0,1 -61.0,61.0,71.0,70.0,2.0,0,0,1,0,1,0,0,1 -25.0,25.0,70.0,70.0,2.0,0,0,1,0,1,0,0,1 -95.0,95.0,61.0,70.0,1.0,0,0,1,0,1,0,0,1 -80.0,80.0,71.0,50.0,17.0,0,0,1,0,1,0,0,1 -51.0,51.0,59.0,30.0,87.0,0,0,1,0,0,1,0,1 -29.0,29.0,67.0,40.0,8.0,0,0,1,0,1,0,0,1 -24.0,24.0,60.0,40.0,2.0,1,0,0,0,1,0,0,1 -18.0,18.0,69.0,40.0,5.0,1,0,0,0,0,1,0,1 -83.0,inf,57.0,99.0,3.0,1,0,0,0,1,0,0,1 -31.0,31.0,39.0,80.0,3.0,1,0,0,0,1,0,0,1 -51.0,51.0,62.0,60.0,5.0,1,0,0,0,1,0,0,1 -90.0,90.0,50.0,60.0,22.0,1,0,0,0,0,1,0,1 -52.0,52.0,43.0,60.0,3.0,1,0,0,0,1,0,0,1 -73.0,73.0,70.0,60.0,3.0,1,0,0,0,1,0,0,1 -8.0,8.0,66.0,50.0,5.0,1,0,0,0,1,0,0,1 -36.0,36.0,61.0,70.0,8.0,1,0,0,0,1,0,0,1 -48.0,48.0,81.0,10.0,4.0,1,0,0,0,1,0,0,1 -7.0,7.0,58.0,40.0,4.0,1,0,0,0,1,0,0,1 -140.0,140.0,63.0,70.0,3.0,1,0,0,0,1,0,0,1 -186.0,186.0,60.0,90.0,3.0,1,0,0,0,1,0,0,1 -84.0,84.0,62.0,80.0,4.0,1,0,0,0,0,1,0,1 -19.0,19.0,42.0,50.0,10.0,1,0,0,0,1,0,0,1 -45.0,45.0,69.0,40.0,3.0,1,0,0,0,1,0,0,1 -80.0,80.0,63.0,40.0,4.0,1,0,0,0,1,0,0,1 -52.0,52.0,45.0,60.0,4.0,0,1,0,0,1,0,0,1 -164.0,164.0,68.0,70.0,15.0,0,1,0,0,0,1,0,1 -19.0,19.0,39.0,30.0,4.0,0,1,0,0,0,1,0,1 -53.0,53.0,66.0,60.0,12.0,0,1,0,0,1,0,0,1 -15.0,15.0,63.0,30.0,5.0,0,1,0,0,1,0,0,1 -43.0,43.0,49.0,60.0,11.0,0,1,0,0,0,1,0,1 -340.0,340.0,64.0,80.0,10.0,0,1,0,0,0,1,0,1 -133.0,133.0,65.0,75.0,1.0,0,1,0,0,1,0,0,1 -111.0,111.0,64.0,60.0,5.0,0,1,0,0,1,0,0,1 -231.0,231.0,67.0,70.0,18.0,0,1,0,0,0,1,0,1 -378.0,378.0,65.0,80.0,4.0,0,1,0,0,1,0,0,1 -49.0,49.0,37.0,30.0,3.0,0,1,0,0,1,0,0,1 diff --git a/ml-xgboost/demo/distributed-training/README.md b/ml-xgboost/demo/distributed-training/README.md deleted file mode 100644 index 7a7a019..0000000 --- a/ml-xgboost/demo/distributed-training/README.md +++ /dev/null @@ -1,27 +0,0 @@ -Distributed XGBoost Training -============================ -This is an tutorial of Distributed XGBoost Training. -Currently xgboost supports distributed training via CLI program with the configuration file. -There is also plan push distributed python and other language bindings, please open an issue -if you are interested in contributing. - -Build XGBoost with Distributed Filesystem Support -------------------------------------------------- -To use distributed xgboost, you only need to turn the options on to build -with distributed filesystems(HDFS or S3) in cmake. - -``` -cmake -DUSE_HDFS=ON -DUSE_S3=ON -DUSE_AZURE=ON -``` - - -Step by Step Tutorial on AWS ----------------------------- -Checkout [this tutorial](https://xgboost.readthedocs.org/en/latest/tutorials/aws_yarn.html) for running distributed xgboost. - - -Model Analysis --------------- -XGBoost is exchangeable across all bindings and platforms. -This means you can use python or R to analyze the learnt model and do prediction. -For example, you can use the [plot_model.ipynb](plot_model.ipynb) to visualize the learnt model. diff --git a/ml-xgboost/demo/distributed-training/mushroom.aws.conf b/ml-xgboost/demo/distributed-training/mushroom.aws.conf deleted file mode 100644 index 0428376..0000000 --- a/ml-xgboost/demo/distributed-training/mushroom.aws.conf +++ /dev/null @@ -1,27 +0,0 @@ -# General Parameters, see comment for each definition -# choose the booster, can be gbtree or gblinear -booster = gbtree -# choose logistic regression loss function for binary classification -objective = binary:logistic - -# Tree Booster Parameters -# step size shrinkage -eta = 1.0 -# minimum loss reduction required to make a further partition -gamma = 1.0 -# minimum sum of instance weight(hessian) needed in a child -min_child_weight = 1 -# maximum depth of a tree -max_depth = 3 - -# Task Parameters -# the number of round to do boosting -num_round = 2 -# 0 means do not save any model except the final round model -save_period = 0 -# The path of training data -data = "s3://mybucket/xgb-demo/train" -# The path of validation data, used to monitor training process, here [test] sets name of the validation set -# evaluate on training data as well each round -eval_train = 1 - diff --git a/ml-xgboost/demo/distributed-training/plot_model.ipynb b/ml-xgboost/demo/distributed-training/plot_model.ipynb deleted file mode 100644 index 227f960..0000000 --- a/ml-xgboost/demo/distributed-training/plot_model.ipynb +++ /dev/null @@ -1,107 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# XGBoost Model Analysis\n", - "\n", - "This notebook can be used to load and analysis model learnt from all xgboost bindings, including distributed training. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import sys\n", - "import os\n", - "%matplotlib inline " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Please change the ```pkg_path``` and ```model_file``` to be correct path" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "pkg_path = '../../python-package/'\n", - "model_file = 's3://my-bucket/xgb-demo/model/0002.model'\n", - "sys.path.insert(0, pkg_path)\n", - "import xgboost as xgb" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Plot the Feature Importance" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# plot the first two trees.\n", - "bst = xgb.Booster(model_file=model_file)\n", - "xgb.plot_importance(bst)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Plot the First Tree" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "tree_id = 0\n", - "xgb.to_graphviz(bst, tree_id)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 2", - "language": "python", - "name": "python2" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 2 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.3" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/ml-xgboost/demo/distributed-training/run_aws.sh b/ml-xgboost/demo/distributed-training/run_aws.sh deleted file mode 100644 index 0b7cb17..0000000 --- a/ml-xgboost/demo/distributed-training/run_aws.sh +++ /dev/null @@ -1,11 +0,0 @@ -# This is the example script to run distributed xgboost on AWS. -# Change the following two lines for configuration - -export BUCKET=mybucket - -# submit the job to YARN -../../dmlc-core/tracker/dmlc-submit --cluster=yarn --num-workers=2 --worker-cores=2\ - ../../xgboost mushroom.aws.conf nthread=2\ - data=s3://${BUCKET}/xgb-demo/train\ - eval[test]=s3://${BUCKET}/xgb-demo/test\ - model_dir=s3://${BUCKET}/xgb-demo/model diff --git a/ml-xgboost/demo/gpu_acceleration/README.md b/ml-xgboost/demo/gpu_acceleration/README.md deleted file mode 100644 index f6b0539..0000000 --- a/ml-xgboost/demo/gpu_acceleration/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# GPU Acceleration Demo - -`cover_type.py` shows how to train a model on the [forest cover type](https://archive.ics.uci.edu/ml/datasets/covertype) dataset using GPU acceleration. The forest cover type dataset has 581,012 rows and 54 features, making it time consuming to process. We compare the run-time and accuracy of the GPU and CPU histogram algorithms. - -`memory.py` shows how to repeatedly train xgboost models while freeing memory between iterations. diff --git a/ml-xgboost/demo/gpu_acceleration/cover_type.py b/ml-xgboost/demo/gpu_acceleration/cover_type.py deleted file mode 100644 index 5a073eb..0000000 --- a/ml-xgboost/demo/gpu_acceleration/cover_type.py +++ /dev/null @@ -1,40 +0,0 @@ -import xgboost as xgb -import numpy as np -from sklearn.datasets import fetch_covtype -from sklearn.model_selection import train_test_split -import time - -# Fetch dataset using sklearn -cov = fetch_covtype() -X = cov.data -y = cov.target - -# Create 0.75/0.25 train/test split -X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, train_size=0.75, - random_state=42) - -# Specify sufficient boosting iterations to reach a minimum -num_round = 3000 - -# Leave most parameters as default -param = {'objective': 'multi:softmax', # Specify multiclass classification - 'num_class': 8, # Number of possible output classes - 'tree_method': 'gpu_hist' # Use GPU accelerated algorithm - } - -# Convert input data from numpy to XGBoost format -dtrain = xgb.DMatrix(X_train, label=y_train) -dtest = xgb.DMatrix(X_test, label=y_test) - -gpu_res = {} # Store accuracy result -tmp = time.time() -# Train model -xgb.train(param, dtrain, num_round, evals=[(dtest, 'test')], evals_result=gpu_res) -print("GPU Training Time: %s seconds" % (str(time.time() - tmp))) - -# Repeat for CPU algorithm -tmp = time.time() -param['tree_method'] = 'hist' -cpu_res = {} -xgb.train(param, dtrain, num_round, evals=[(dtest, 'test')], evals_result=cpu_res) -print("CPU Training Time: %s seconds" % (str(time.time() - tmp))) diff --git a/ml-xgboost/demo/gpu_acceleration/memory.py b/ml-xgboost/demo/gpu_acceleration/memory.py deleted file mode 100644 index f0f9555..0000000 --- a/ml-xgboost/demo/gpu_acceleration/memory.py +++ /dev/null @@ -1,51 +0,0 @@ -import xgboost as xgb -import numpy as np -import time -import pickle -import GPUtil - -n = 10000 -m = 1000 -X = np.random.random((n, m)) -y = np.random.random(n) - -param = {'objective': 'binary:logistic', - 'tree_method': 'gpu_hist' - } -iterations = 5 -dtrain = xgb.DMatrix(X, label=y) - -# High memory usage -# active bst objects with device memory persist across iterations -boosters = [] -for i in range(iterations): - bst = xgb.train(param, dtrain) - boosters.append(bst) - -print("Example 1") -GPUtil.showUtilization() -del boosters - -# Better memory usage -# The bst object can be destroyed by the python gc, freeing device memory -# The gc may not immediately free the object, so more than one booster can be allocated at a time -boosters = [] -for i in range(iterations): - bst = xgb.train(param, dtrain) - boosters.append(pickle.dumps(bst)) - -print("Example 2") -GPUtil.showUtilization() -del boosters - -# Best memory usage -# The gc explicitly frees the booster before starting the next iteration -boosters = [] -for i in range(iterations): - bst = xgb.train(param, dtrain) - boosters.append(pickle.dumps(bst)) - del bst - -print("Example 3") -GPUtil.showUtilization() -del boosters diff --git a/ml-xgboost/demo/guide-python/README.md b/ml-xgboost/demo/guide-python/README.md deleted file mode 100644 index 58110d6..0000000 --- a/ml-xgboost/demo/guide-python/README.md +++ /dev/null @@ -1,16 +0,0 @@ -XGBoost Python Feature Walkthrough -================================== -* [Basic walkthrough of wrappers](basic_walkthrough.py) -* [Customize loss function, and evaluation metric](custom_objective.py) -* [Re-implement RMSLE as customized metric and objective](custom_rmsle.py) -* [Re-Implement `multi:softmax` objective as customized objective](custom_softmax.py) -* [Boosting from existing prediction](boost_from_prediction.py) -* [Predicting using first n trees](predict_first_ntree.py) -* [Generalized Linear Model](generalized_linear_model.py) -* [Cross validation](cross_validation.py) -* [Predicting leaf indices](predict_leaf_indices.py) -* [Sklearn Wrapper](sklearn_examples.py) -* [Sklearn Parallel](sklearn_parallel.py) -* [Sklearn access evals result](sklearn_evals_result.py) -* [Access evals result](evals_result.py) -* [External Memory](external_memory.py) diff --git a/ml-xgboost/demo/guide-python/basic_walkthrough.py b/ml-xgboost/demo/guide-python/basic_walkthrough.py deleted file mode 100644 index ec93968..0000000 --- a/ml-xgboost/demo/guide-python/basic_walkthrough.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python -import numpy as np -import scipy.sparse -import pickle -import xgboost as xgb -import os - -# Make sure the demo knows where to load the data. -CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) -XGBOOST_ROOT_DIR = os.path.dirname(os.path.dirname(CURRENT_DIR)) -DEMO_DIR = os.path.join(XGBOOST_ROOT_DIR, 'demo') - -# simple example -# load file from text file, also binary buffer generated by xgboost -dtrain = xgb.DMatrix(os.path.join(DEMO_DIR, 'data', 'agaricus.txt.train')) -dtest = xgb.DMatrix(os.path.join(DEMO_DIR, 'data', 'agaricus.txt.test')) - -# specify parameters via map, definition are same as c++ version -param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'} - -# specify validations set to watch performance -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -num_round = 2 -bst = xgb.train(param, dtrain, num_round, watchlist) - -# this is prediction -preds = bst.predict(dtest) -labels = dtest.get_label() -print('error=%f' % - (sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / - float(len(preds)))) -bst.save_model('0001.model') -# dump model -bst.dump_model('dump.raw.txt') -# dump model with feature map -bst.dump_model('dump.nice.txt', os.path.join(DEMO_DIR, 'data/featmap.txt')) - -# save dmatrix into binary buffer -dtest.save_binary('dtest.buffer') -# save model -bst.save_model('xgb.model') -# load model and data in -bst2 = xgb.Booster(model_file='xgb.model') -dtest2 = xgb.DMatrix('dtest.buffer') -preds2 = bst2.predict(dtest2) -# assert they are the same -assert np.sum(np.abs(preds2 - preds)) == 0 - -# alternatively, you can pickle the booster -pks = pickle.dumps(bst2) -# load model and data in -bst3 = pickle.loads(pks) -preds3 = bst3.predict(dtest2) -# assert they are the same -assert np.sum(np.abs(preds3 - preds)) == 0 - -### -# build dmatrix from scipy.sparse -print('start running example of build DMatrix from scipy.sparse CSR Matrix') -labels = [] -row = [] -col = [] -dat = [] -i = 0 -for l in open(os.path.join(DEMO_DIR, 'data', 'agaricus.txt.train')): - arr = l.split() - labels.append(int(arr[0])) - for it in arr[1:]: - k, v = it.split(':') - row.append(i) - col.append(int(k)) - dat.append(float(v)) - i += 1 -csr = scipy.sparse.csr_matrix((dat, (row, col))) -dtrain = xgb.DMatrix(csr, label=labels) -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -bst = xgb.train(param, dtrain, num_round, watchlist) - -print('start running example of build DMatrix from scipy.sparse CSC Matrix') -# we can also construct from csc matrix -csc = scipy.sparse.csc_matrix((dat, (row, col))) -dtrain = xgb.DMatrix(csc, label=labels) -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -bst = xgb.train(param, dtrain, num_round, watchlist) - -print('start running example of build DMatrix from numpy array') -# NOTE: npymat is numpy array, we will convert it into scipy.sparse.csr_matrix -# in internal implementation then convert to DMatrix -npymat = csr.todense() -dtrain = xgb.DMatrix(npymat, label=labels) -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -bst = xgb.train(param, dtrain, num_round, watchlist) diff --git a/ml-xgboost/demo/guide-python/boost_from_prediction.py b/ml-xgboost/demo/guide-python/boost_from_prediction.py deleted file mode 100644 index 948b47a..0000000 --- a/ml-xgboost/demo/guide-python/boost_from_prediction.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/python -import xgboost as xgb - -dtrain = xgb.DMatrix('../data/agaricus.txt.train') -dtest = xgb.DMatrix('../data/agaricus.txt.test') -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -### -# advanced: start from a initial base prediction -# -print('start running example to start from a initial prediction') -# specify parameters via map, definition are same as c++ version -param = {'max_depth': 2, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic'} -# train xgboost for 1 round -bst = xgb.train(param, dtrain, 1, watchlist) -# Note: we need the margin value instead of transformed prediction in -# set_base_margin -# do predict with output_margin=True, will always give you margin values -# before logistic transformation -ptrain = bst.predict(dtrain, output_margin=True) -ptest = bst.predict(dtest, output_margin=True) -dtrain.set_base_margin(ptrain) -dtest.set_base_margin(ptest) - -print('this is result of running from initial prediction') -bst = xgb.train(param, dtrain, 1, watchlist) diff --git a/ml-xgboost/demo/guide-python/cross_validation.py b/ml-xgboost/demo/guide-python/cross_validation.py deleted file mode 100644 index 948992c..0000000 --- a/ml-xgboost/demo/guide-python/cross_validation.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/python -import numpy as np -import xgboost as xgb - -### load data in do training -dtrain = xgb.DMatrix('../data/agaricus.txt.train') -param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic'} -num_round = 2 - -print('running cross validation') -# do cross validation, this will print result out as -# [iteration] metric_name:mean_value+std_value -# std_value is standard deviation of the metric -xgb.cv(param, dtrain, num_round, nfold=5, - metrics={'error'}, seed=0, - callbacks=[xgb.callback.print_evaluation(show_stdv=True)]) - -print('running cross validation, disable standard deviation display') -# do cross validation, this will print result out as -# [iteration] metric_name:mean_value -res = xgb.cv(param, dtrain, num_boost_round=10, nfold=5, - metrics={'error'}, seed=0, - callbacks=[xgb.callback.print_evaluation(show_stdv=False), - xgb.callback.early_stop(3)]) -print(res) -print('running cross validation, with preprocessing function') -# define the preprocessing function -# used to return the preprocessed training, test data, and parameter -# we can use this to do weight rescale, etc. -# as a example, we try to set scale_pos_weight -def fpreproc(dtrain, dtest, param): - label = dtrain.get_label() - ratio = float(np.sum(label == 0)) / np.sum(label == 1) - param['scale_pos_weight'] = ratio - return (dtrain, dtest, param) - -# do cross validation, for each fold -# the dtrain, dtest, param will be passed into fpreproc -# then the return value of fpreproc will be used to generate -# results of that fold -xgb.cv(param, dtrain, num_round, nfold=5, - metrics={'auc'}, seed=0, fpreproc=fpreproc) - -### -# you can also do cross validation with customized loss function -# See custom_objective.py -## -print('running cross validation, with customized loss function') -def logregobj(preds, dtrain): - labels = dtrain.get_label() - preds = 1.0 / (1.0 + np.exp(-preds)) - grad = preds - labels - hess = preds * (1.0 - preds) - return grad, hess -def evalerror(preds, dtrain): - labels = dtrain.get_label() - return 'error', float(sum(labels != (preds > 0.0))) / len(labels) - -param = {'max_depth':2, 'eta':1, 'silent':1} -# train with customized objective -xgb.cv(param, dtrain, num_round, nfold=5, seed=0, - obj=logregobj, feval=evalerror) diff --git a/ml-xgboost/demo/guide-python/custom_objective.py b/ml-xgboost/demo/guide-python/custom_objective.py deleted file mode 100644 index 5bbcecc..0000000 --- a/ml-xgboost/demo/guide-python/custom_objective.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/python -import numpy as np -import xgboost as xgb -### -# advanced: customized loss function -# -print('start running example to used customized objective function') - -dtrain = xgb.DMatrix('../data/agaricus.txt.train') -dtest = xgb.DMatrix('../data/agaricus.txt.test') - -# note: for customized objective function, we leave objective as default -# note: what we are getting is margin value in prediction -# you must know what you are doing -param = {'max_depth': 2, 'eta': 1, 'silent': 1} -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -num_round = 2 - - -# user define objective function, given prediction, return gradient and second -# order gradient this is log likelihood loss -def logregobj(preds, dtrain): - labels = dtrain.get_label() - preds = 1.0 / (1.0 + np.exp(-preds)) - grad = preds - labels - hess = preds * (1.0 - preds) - return grad, hess - - -# user defined evaluation function, return a pair metric_name, result - -# NOTE: when you do customized loss function, the default prediction value is -# margin. this may make builtin evaluation metric not function properly for -# example, we are doing logistic loss, the prediction is score before logistic -# transformation the builtin evaluation error assumes input is after logistic -# transformation Take this in mind when you use the customization, and maybe -# you need write customized evaluation function -def evalerror(preds, dtrain): - labels = dtrain.get_label() - # return a pair metric_name, result. The metric name must not contain a - # colon (:) or a space since preds are margin(before logistic - # transformation, cutoff at 0) - return 'my-error', float(sum(labels != (preds > 0.0))) / len(labels) - - -# training with customized objective, we can also do step by step training -# simply look at xgboost.py's implementation of train -bst = xgb.train(param, dtrain, num_round, watchlist, obj=logregobj, - feval=evalerror) diff --git a/ml-xgboost/demo/guide-python/custom_rmsle.py b/ml-xgboost/demo/guide-python/custom_rmsle.py deleted file mode 100644 index 6292636..0000000 --- a/ml-xgboost/demo/guide-python/custom_rmsle.py +++ /dev/null @@ -1,197 +0,0 @@ -'''Demo for defining customized metric and objective. Notice that for -simplicity reason weight is not used in following example. In this -script, we implement the Squared Log Error (SLE) objective and RMSLE metric as customized -functions, then compare it with native implementation in XGBoost. - -See doc/tutorials/custom_metric_obj.rst for a step by step -walkthrough, with other details. - -The `SLE` objective reduces impact of outliers in training dataset, -hence here we also compare its performance with standard squared -error. - -''' -import numpy as np -import xgboost as xgb -from typing import Tuple, Dict, List -from time import time -import argparse -import matplotlib -from matplotlib import pyplot as plt - -# shape of generated data. -kRows = 4096 -kCols = 16 - -kOutlier = 10000 # mean of generated outliers -kNumberOfOutliers = 64 - -kRatio = 0.7 -kSeed = 1994 - -kBoostRound = 20 - -np.random.seed(seed=kSeed) - - -def generate_data() -> Tuple[xgb.DMatrix, xgb.DMatrix]: - '''Generate data containing outliers.''' - x = np.random.randn(kRows, kCols) - y = np.random.randn(kRows) - y += np.abs(np.min(y)) - - # Create outliers - for i in range(0, kNumberOfOutliers): - ind = np.random.randint(0, len(y)-1) - y[ind] += np.random.randint(0, kOutlier) - - train_portion = int(kRows * kRatio) - - # rmsle requires all label be greater than -1. - assert np.all(y > -1.0) - - train_x: np.ndarray = x[: train_portion] - train_y: np.ndarray = y[: train_portion] - dtrain = xgb.DMatrix(train_x, label=train_y) - - test_x = x[train_portion:] - test_y = y[train_portion:] - dtest = xgb.DMatrix(test_x, label=test_y) - return dtrain, dtest - - -def native_rmse(dtrain: xgb.DMatrix, - dtest: xgb.DMatrix) -> Dict[str, Dict[str, List[float]]]: - '''Train using native implementation of Root Mean Squared Loss.''' - print('Squared Error') - squared_error = { - 'objective': 'reg:squarederror', - 'eval_metric': 'rmse', - 'tree_method': 'hist', - 'seed': kSeed - } - start = time() - results: Dict[str, Dict[str, List[float]]] = {} - xgb.train(squared_error, - dtrain=dtrain, - num_boost_round=kBoostRound, - evals=[(dtrain, 'dtrain'), (dtest, 'dtest')], - evals_result=results) - print('Finished Squared Error in:', time() - start, '\n') - return results - - -def native_rmsle(dtrain: xgb.DMatrix, - dtest: xgb.DMatrix) -> Dict[str, Dict[str, List[float]]]: - '''Train using native implementation of Squared Log Error.''' - print('Squared Log Error') - results: Dict[str, Dict[str, List[float]]] = {} - squared_log_error = { - 'objective': 'reg:squaredlogerror', - 'eval_metric': 'rmsle', - 'tree_method': 'hist', - 'seed': kSeed - } - start = time() - xgb.train(squared_log_error, - dtrain=dtrain, - num_boost_round=kBoostRound, - evals=[(dtrain, 'dtrain'), (dtest, 'dtest')], - evals_result=results) - print('Finished Squared Log Error in:', time() - start) - return results - - -def py_rmsle(dtrain: xgb.DMatrix, dtest: xgb.DMatrix) -> Dict: - '''Train using Python implementation of Squared Log Error.''' - def gradient(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray: - '''Compute the gradient squared log error.''' - y = dtrain.get_label() - return (np.log1p(predt) - np.log1p(y)) / (predt + 1) - - def hessian(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray: - '''Compute the hessian for squared log error.''' - y = dtrain.get_label() - return ((-np.log1p(predt) + np.log1p(y) + 1) / - np.power(predt + 1, 2)) - - def squared_log(predt: np.ndarray, - dtrain: xgb.DMatrix) -> Tuple[np.ndarray, np.ndarray]: - '''Squared Log Error objective. A simplified version for RMSLE used as - objective function. - - :math:`\frac{1}{2}[log(pred + 1) - log(label + 1)]^2` - - ''' - predt[predt < -1] = -1 + 1e-6 - grad = gradient(predt, dtrain) - hess = hessian(predt, dtrain) - return grad, hess - - def rmsle(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]: - ''' Root mean squared log error metric. - - :math:`\sqrt{\frac{1}{N}[log(pred + 1) - log(label + 1)]^2}` - ''' - y = dtrain.get_label() - predt[predt < -1] = -1 + 1e-6 - elements = np.power(np.log1p(y) - np.log1p(predt), 2) - return 'PyRMSLE', float(np.sqrt(np.sum(elements) / len(y))) - - results: Dict[str, Dict[str, List[float]]] = {} - xgb.train({'tree_method': 'hist', 'seed': kSeed, - 'disable_default_eval_metric': 1}, - dtrain=dtrain, - num_boost_round=kBoostRound, - obj=squared_log, - feval=rmsle, - evals=[(dtrain, 'dtrain'), (dtest, 'dtest')], - evals_result=results) - - return results - - -def plot_history(rmse_evals, rmsle_evals, py_rmsle_evals): - fig, axs = plt.subplots(3, 1) - ax0: matplotlib.axes.Axes = axs[0] - ax1: matplotlib.axes.Axes = axs[1] - ax2: matplotlib.axes.Axes = axs[2] - - x = np.arange(0, kBoostRound, 1) - - ax0.plot(x, rmse_evals['dtrain']['rmse'], label='train-RMSE') - ax0.plot(x, rmse_evals['dtest']['rmse'], label='test-RMSE') - ax0.legend() - - ax1.plot(x, rmsle_evals['dtrain']['rmsle'], label='train-native-RMSLE') - ax1.plot(x, rmsle_evals['dtest']['rmsle'], label='test-native-RMSLE') - ax1.legend() - - ax2.plot(x, py_rmsle_evals['dtrain']['PyRMSLE'], label='train-PyRMSLE') - ax2.plot(x, py_rmsle_evals['dtest']['PyRMSLE'], label='test-PyRMSLE') - ax2.legend() - - plt.show() - plt.close() - - -def main(args): - dtrain, dtest = generate_data() - rmse_evals = native_rmse(dtrain, dtest) - rmsle_evals = native_rmsle(dtrain, dtest) - py_rmsle_evals = py_rmsle(dtrain, dtest) - - if args.plot != 0: - plot_history(rmse_evals, rmsle_evals, py_rmsle_evals) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description='Arguments for custom RMSLE objective function demo.') - parser.add_argument( - '--plot', - type=int, - default=1, - help='Set to 0 to disable plotting the evaluation history.') - args = parser.parse_args() - main(args) diff --git a/ml-xgboost/demo/guide-python/custom_softmax.py b/ml-xgboost/demo/guide-python/custom_softmax.py deleted file mode 100644 index b38b23d..0000000 --- a/ml-xgboost/demo/guide-python/custom_softmax.py +++ /dev/null @@ -1,148 +0,0 @@ -'''Demo for creating customized multi-class objective function. This demo is -only applicable after (excluding) XGBoost 1.0.0, as before this version XGBoost -returns transformed prediction for multi-class objective function. More -details in comments. - -''' - -import numpy as np -import xgboost as xgb -from matplotlib import pyplot as plt -import argparse - -np.random.seed(1994) - -kRows = 100 -kCols = 10 -kClasses = 4 # number of classes - -kRounds = 10 # number of boosting rounds. - -# Generate some random data for demo. -X = np.random.randn(kRows, kCols) -y = np.random.randint(0, 4, size=kRows) - -m = xgb.DMatrix(X, y) - - -def softmax(x): - '''Softmax function with x as input vector.''' - e = np.exp(x) - return e / np.sum(e) - - -def softprob_obj(predt: np.ndarray, data: xgb.DMatrix): - '''Loss function. Computing the gradient and approximated hessian (diagonal). - Reimplements the `multi:softprob` inside XGBoost. - - ''' - labels = data.get_label() - if data.get_weight().size == 0: - # Use 1 as weight if we don't have custom weight. - weights = np.ones((kRows, 1), dtype=float) - else: - weights = data.get_weight() - - # The prediction is of shape (rows, classes), each element in a row - # represents a raw prediction (leaf weight, hasn't gone through softmax - # yet). In XGBoost 1.0.0, the prediction is transformed by a softmax - # function, fixed in later versions. - assert predt.shape == (kRows, kClasses) - - grad = np.zeros((kRows, kClasses), dtype=float) - hess = np.zeros((kRows, kClasses), dtype=float) - - eps = 1e-6 - - # compute the gradient and hessian, slow iterations in Python, only - # suitable for demo. Also the one in native XGBoost core is more robust to - # numeric overflow as we don't do anything to mitigate the `exp` in - # `softmax` here. - for r in range(predt.shape[0]): - target = labels[r] - p = softmax(predt[r, :]) - for c in range(predt.shape[1]): - assert target >= 0 or target <= kClasses - g = p[c] - 1.0 if c == target else p[c] - g = g * weights[r] - h = max((2.0 * p[c] * (1.0 - p[c]) * weights[r]).item(), eps) - grad[r, c] = g - hess[r, c] = h - - # Right now (XGBoost 1.0.0), reshaping is necessary - grad = grad.reshape((kRows * kClasses, 1)) - hess = hess.reshape((kRows * kClasses, 1)) - return grad, hess - - -def predict(booster, X): - '''A customized prediction function that converts raw prediction to - target class. - - ''' - # Output margin means we want to obtain the raw prediction obtained from - # tree leaf weight. - predt = booster.predict(X, output_margin=True) - out = np.zeros(kRows) - for r in range(predt.shape[0]): - # the class with maximum prob (not strictly prob as it haven't gone - # through softmax yet so it doesn't sum to 1, but result is the same - # for argmax). - i = np.argmax(predt[r]) - out[r] = i - return out - - -def plot_history(custom_results, native_results): - fig, axs = plt.subplots(2, 1) - ax0 = axs[0] - ax1 = axs[1] - - x = np.arange(0, kRounds, 1) - ax0.plot(x, custom_results['train']['merror'], label='Custom objective') - ax0.legend() - ax1.plot(x, native_results['train']['merror'], label='multi:softmax') - ax1.legend() - - plt.show() - - -def main(args): - custom_results = {} - # Use our custom objective function - booster_custom = xgb.train({'num_class': kClasses}, - m, - num_boost_round=kRounds, - obj=softprob_obj, - evals_result=custom_results, - evals=[(m, 'train')]) - - predt_custom = predict(booster_custom, m) - - native_results = {} - # Use the same objective function defined in XGBoost. - booster_native = xgb.train({'num_class': kClasses}, - m, - num_boost_round=kRounds, - evals_result=native_results, - evals=[(m, 'train')]) - predt_native = booster_native.predict(m) - - # We are reimplementing the loss function in XGBoost, so it should - # be the same for normal cases. - assert np.all(predt_custom == predt_native) - - if args.plot != 0: - plot_history(custom_results, native_results) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description='Arguments for custom softmax objective function demo.') - parser.add_argument( - '--plot', - type=int, - default=1, - help='Set to 0 to disable plotting the evaluation history.') - args = parser.parse_args() - main(args) diff --git a/ml-xgboost/demo/guide-python/evals_result.py b/ml-xgboost/demo/guide-python/evals_result.py deleted file mode 100644 index 8449b93..0000000 --- a/ml-xgboost/demo/guide-python/evals_result.py +++ /dev/null @@ -1,30 +0,0 @@ -## -# This script demonstrate how to access the eval metrics in xgboost -## - -import xgboost as xgb -dtrain = xgb.DMatrix('../data/agaricus.txt.train', silent=True) -dtest = xgb.DMatrix('../data/agaricus.txt.test', silent=True) - -param = [('max_depth', 2), ('objective', 'binary:logistic'), ('eval_metric', 'logloss'), ('eval_metric', 'error')] - -num_round = 2 -watchlist = [(dtest,'eval'), (dtrain,'train')] - -evals_result = {} -bst = xgb.train(param, dtrain, num_round, watchlist, evals_result=evals_result) - -print('Access logloss metric directly from evals_result:') -print(evals_result['eval']['logloss']) - -print('') -print('Access metrics through a loop:') -for e_name, e_mtrs in evals_result.items(): - print('- {}'.format(e_name)) - for e_mtr_name, e_mtr_vals in e_mtrs.items(): - print(' - {}'.format(e_mtr_name)) - print(' - {}'.format(e_mtr_vals)) - -print('') -print('Access complete dictionary:') -print(evals_result) diff --git a/ml-xgboost/demo/guide-python/external_memory.py b/ml-xgboost/demo/guide-python/external_memory.py deleted file mode 100644 index 97a74b0..0000000 --- a/ml-xgboost/demo/guide-python/external_memory.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/python -import numpy as np -import scipy.sparse -import xgboost as xgb - -### simple example for using external memory version - -# this is the only difference, add a # followed by a cache prefix name -# several cache file with the prefix will be generated -# currently only support convert from libsvm file -dtrain = xgb.DMatrix('../data/agaricus.txt.train#dtrain.cache') -dtest = xgb.DMatrix('../data/agaricus.txt.test#dtest.cache') - -# specify validations set to watch performance -param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic'} - -# performance notice: set nthread to be the number of your real cpu -# some cpu offer two threads per core, for example, a 4 core cpu with 8 threads, in such case set nthread=4 -#param['nthread']=num_real_cpu - -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -num_round = 2 -bst = xgb.train(param, dtrain, num_round, watchlist) - - diff --git a/ml-xgboost/demo/guide-python/gamma_regression.py b/ml-xgboost/demo/guide-python/gamma_regression.py deleted file mode 100644 index af7103b..0000000 --- a/ml-xgboost/demo/guide-python/gamma_regression.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/python -import xgboost as xgb -import numpy as np - -# this script demonstrates how to fit gamma regression model (with log link function) -# in xgboost, before running the demo you need to generate the autoclaims dataset -# by running gen_autoclaims.R located in xgboost/demo/data. - -data = np.genfromtxt('../data/autoclaims.csv', delimiter=',') -dtrain = xgb.DMatrix(data[0:4741, 0:34], data[0:4741, 34]) -dtest = xgb.DMatrix(data[4741:6773, 0:34], data[4741:6773, 34]) - -# for gamma regression, we need to set the objective to 'reg:gamma', it also suggests -# to set the base_score to a value between 1 to 5 if the number of iteration is small -param = {'silent':1, 'objective':'reg:gamma', 'booster':'gbtree', 'base_score':3} - -# the rest of settings are the same -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -num_round = 30 - -# training and evaluation -bst = xgb.train(param, dtrain, num_round, watchlist) -preds = bst.predict(dtest) -labels = dtest.get_label() -print('test deviance=%f' % (2 * np.sum((labels - preds) / preds - np.log(labels) + np.log(preds)))) diff --git a/ml-xgboost/demo/guide-python/generalized_linear_model.py b/ml-xgboost/demo/guide-python/generalized_linear_model.py deleted file mode 100644 index c85c5ca..0000000 --- a/ml-xgboost/demo/guide-python/generalized_linear_model.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/python -import xgboost as xgb -## -# this script demonstrate how to fit generalized linear model in xgboost -# basically, we are using linear model, instead of tree for our boosters -## -dtrain = xgb.DMatrix('../data/agaricus.txt.train') -dtest = xgb.DMatrix('../data/agaricus.txt.test') -# change booster to gblinear, so that we are fitting a linear model -# alpha is the L1 regularizer -# lambda is the L2 regularizer -# you can also set lambda_bias which is L2 regularizer on the bias term -param = {'silent':1, 'objective':'binary:logistic', 'booster':'gblinear', - 'alpha': 0.0001, 'lambda': 1} - -# normally, you do not need to set eta (step_size) -# XGBoost uses a parallel coordinate descent algorithm (shotgun), -# there could be affection on convergence with parallelization on certain cases -# setting eta to be smaller value, e.g 0.5 can make the optimization more stable -# param['eta'] = 1 - -## -# the rest of settings are the same -## -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -num_round = 4 -bst = xgb.train(param, dtrain, num_round, watchlist) -preds = bst.predict(dtest) -labels = dtest.get_label() -print('error=%f' % (sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)))) diff --git a/ml-xgboost/demo/guide-python/predict_first_ntree.py b/ml-xgboost/demo/guide-python/predict_first_ntree.py deleted file mode 100644 index 3a8dbbb..0000000 --- a/ml-xgboost/demo/guide-python/predict_first_ntree.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python -import numpy as np -import xgboost as xgb - -### load data in do training -dtrain = xgb.DMatrix('../data/agaricus.txt.train') -dtest = xgb.DMatrix('../data/agaricus.txt.test') -param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic'} -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -num_round = 3 -bst = xgb.train(param, dtrain, num_round, watchlist) - -print('start testing prediction from first n trees') -### predict using first 1 tree -label = dtest.get_label() -ypred1 = bst.predict(dtest, ntree_limit=1) -# by default, we predict using all the trees -ypred2 = bst.predict(dtest) -print('error of ypred1=%f' % (np.sum((ypred1 > 0.5) != label) / float(len(label)))) -print('error of ypred2=%f' % (np.sum((ypred2 > 0.5) != label) / float(len(label)))) diff --git a/ml-xgboost/demo/guide-python/predict_leaf_indices.py b/ml-xgboost/demo/guide-python/predict_leaf_indices.py deleted file mode 100644 index 383e8d5..0000000 --- a/ml-xgboost/demo/guide-python/predict_leaf_indices.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/python -import xgboost as xgb - -### load data in do training -dtrain = xgb.DMatrix('../data/agaricus.txt.train') -dtest = xgb.DMatrix('../data/agaricus.txt.test') -param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic'} -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -num_round = 3 -bst = xgb.train(param, dtrain, num_round, watchlist) - -print ('start testing predict the leaf indices') -### predict using first 2 tree -leafindex = bst.predict(dtest, ntree_limit=2, pred_leaf=True) -print(leafindex.shape) -print(leafindex) -### predict all trees -leafindex = bst.predict(dtest, pred_leaf=True) -print(leafindex.shape) diff --git a/ml-xgboost/demo/guide-python/runall.sh b/ml-xgboost/demo/guide-python/runall.sh deleted file mode 100644 index 9eda92b..0000000 --- a/ml-xgboost/demo/guide-python/runall.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -export PYTHONPATH=$PYTHONPATH:../../python-package -python basic_walkthrough.py -python custom_objective.py -python boost_from_prediction.py -python predict_first_ntree.py -python generalized_linear_model.py -python cross_validation.py -python predict_leaf_indices.py -python sklearn_examples.py -python sklearn_parallel.py -python external_memory.py -rm -rf *~ *.model *.buffer diff --git a/ml-xgboost/demo/guide-python/sklearn_evals_result.py b/ml-xgboost/demo/guide-python/sklearn_evals_result.py deleted file mode 100644 index a72cdfc..0000000 --- a/ml-xgboost/demo/guide-python/sklearn_evals_result.py +++ /dev/null @@ -1,43 +0,0 @@ -## -# This script demonstrate how to access the xgboost eval metrics by using sklearn -## - -import xgboost as xgb -import numpy as np -from sklearn.datasets import make_hastie_10_2 - -X, y = make_hastie_10_2(n_samples=2000, random_state=42) - -# Map labels from {-1, 1} to {0, 1} -labels, y = np.unique(y, return_inverse=True) - -X_train, X_test = X[:1600], X[1600:] -y_train, y_test = y[:1600], y[1600:] - -param_dist = {'objective':'binary:logistic', 'n_estimators':2} - -clf = xgb.XGBModel(**param_dist) -# Or you can use: clf = xgb.XGBClassifier(**param_dist) - -clf.fit(X_train, y_train, - eval_set=[(X_train, y_train), (X_test, y_test)], - eval_metric='logloss', - verbose=True) - -# Load evals result by calling the evals_result() function -evals_result = clf.evals_result() - -print('Access logloss metric directly from validation_0:') -print(evals_result['validation_0']['logloss']) - -print('') -print('Access metrics through a loop:') -for e_name, e_mtrs in evals_result.items(): - print('- {}'.format(e_name)) - for e_mtr_name, e_mtr_vals in e_mtrs.items(): - print(' - {}'.format(e_mtr_name)) - print(' - {}'.format(e_mtr_vals)) - -print('') -print('Access complete dict:') -print(evals_result) diff --git a/ml-xgboost/demo/guide-python/sklearn_examples.py b/ml-xgboost/demo/guide-python/sklearn_examples.py deleted file mode 100644 index d4f9924..0000000 --- a/ml-xgboost/demo/guide-python/sklearn_examples.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/python -''' -Created on 1 Apr 2015 - -@author: Jamie Hall -''' -import pickle -import xgboost as xgb - -import numpy as np -from sklearn.model_selection import KFold, train_test_split, GridSearchCV -from sklearn.metrics import confusion_matrix, mean_squared_error -from sklearn.datasets import load_iris, load_digits, load_boston - -rng = np.random.RandomState(31337) - -print("Zeros and Ones from the Digits dataset: binary classification") -digits = load_digits(2) -y = digits['target'] -X = digits['data'] -kf = KFold(n_splits=2, shuffle=True, random_state=rng) -for train_index, test_index in kf.split(X): - xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) - predictions = xgb_model.predict(X[test_index]) - actuals = y[test_index] - print(confusion_matrix(actuals, predictions)) - -print("Iris: multiclass classification") -iris = load_iris() -y = iris['target'] -X = iris['data'] -kf = KFold(n_splits=2, shuffle=True, random_state=rng) -for train_index, test_index in kf.split(X): - xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) - predictions = xgb_model.predict(X[test_index]) - actuals = y[test_index] - print(confusion_matrix(actuals, predictions)) - -print("Boston Housing: regression") -boston = load_boston() -y = boston['target'] -X = boston['data'] -kf = KFold(n_splits=2, shuffle=True, random_state=rng) -for train_index, test_index in kf.split(X): - xgb_model = xgb.XGBRegressor().fit(X[train_index], y[train_index]) - predictions = xgb_model.predict(X[test_index]) - actuals = y[test_index] - print(mean_squared_error(actuals, predictions)) - -print("Parameter optimization") -y = boston['target'] -X = boston['data'] -xgb_model = xgb.XGBRegressor() -clf = GridSearchCV(xgb_model, - {'max_depth': [2,4,6], - 'n_estimators': [50,100,200]}, verbose=1) -clf.fit(X,y) -print(clf.best_score_) -print(clf.best_params_) - -# The sklearn API models are picklable -print("Pickling sklearn API models") -# must open in binary format to pickle -pickle.dump(clf, open("best_boston.pkl", "wb")) -clf2 = pickle.load(open("best_boston.pkl", "rb")) -print(np.allclose(clf.predict(X), clf2.predict(X))) - -# Early-stopping - -X = digits['data'] -y = digits['target'] -X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) -clf = xgb.XGBClassifier() -clf.fit(X_train, y_train, early_stopping_rounds=10, eval_metric="auc", - eval_set=[(X_test, y_test)]) - diff --git a/ml-xgboost/demo/guide-python/sklearn_parallel.py b/ml-xgboost/demo/guide-python/sklearn_parallel.py deleted file mode 100644 index 04f13f7..0000000 --- a/ml-xgboost/demo/guide-python/sklearn_parallel.py +++ /dev/null @@ -1,35 +0,0 @@ -import os - -if __name__ == "__main__": - # NOTE: on posix systems, this *has* to be here and in the - # `__name__ == "__main__"` clause to run XGBoost in parallel processes - # using fork, if XGBoost was built with OpenMP support. Otherwise, if you - # build XGBoost without OpenMP support, you can use fork, which is the - # default backend for joblib, and omit this. - try: - from multiprocessing import set_start_method - except ImportError: - raise ImportError("Unable to import multiprocessing.set_start_method." - " This example only runs on Python 3.4") - set_start_method("forkserver") - - import numpy as np - from sklearn.model_selection import GridSearchCV - from sklearn.datasets import load_boston - import xgboost as xgb - - rng = np.random.RandomState(31337) - - print("Parallel Parameter optimization") - boston = load_boston() - - os.environ["OMP_NUM_THREADS"] = "2" # or to whatever you want - y = boston['target'] - X = boston['data'] - xgb_model = xgb.XGBRegressor() - clf = GridSearchCV(xgb_model, {'max_depth': [2, 4, 6], - 'n_estimators': [50, 100, 200]}, verbose=1, - n_jobs=2) - clf.fit(X, y) - print(clf.best_score_) - print(clf.best_params_) diff --git a/ml-xgboost/demo/json-model/README.md b/ml-xgboost/demo/json-model/README.md deleted file mode 100644 index 065d854..0000000 --- a/ml-xgboost/demo/json-model/README.md +++ /dev/null @@ -1,3 +0,0 @@ -We introduced initial support for saving XGBoost model in JSON format in 1.0.0. Note that -it's still experimental and under development, output schema is subject to change due to -bug fixes or further refactoring. For an overview, see https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html . \ No newline at end of file diff --git a/ml-xgboost/demo/json-model/json_parser.py b/ml-xgboost/demo/json-model/json_parser.py deleted file mode 100644 index eedcbf9..0000000 --- a/ml-xgboost/demo/json-model/json_parser.py +++ /dev/null @@ -1,180 +0,0 @@ -'''Demonstration for parsing JSON tree model file generated by XGBoost. The -support is experimental, output schema is subject to change in the future. -''' -import json -import argparse - - -class Tree: - '''A tree built by XGBoost.''' - # Index into node array - _left = 0 - _right = 1 - _parent = 2 - _ind = 3 - _cond = 4 - _default_left = 5 - # Index into stat array - _loss_chg = 0 - _sum_hess = 1 - _base_weight = 2 - _child_cnt = 3 - - def __init__(self, tree_id: int, nodes, stats): - self.tree_id = tree_id - self.nodes = nodes - self.stats = stats - - def loss_change(self, node_id: int): - '''Loss gain of a node.''' - return self.stats[node_id][self._loss_chg] - - def sum_hessian(self, node_id: int): - '''Sum Hessian of a node.''' - return self.stats[node_id][self._sum_hess] - - def base_weight(self, node_id: int): - '''Base weight of a node.''' - return self.stats[node_id][self._base_weight] - - def num_children(self, node_id: int): - '''Number of children of a node.''' - return self.stats[node_id][self._child_cnt] - - def split_index(self, node_id: int): - '''Split feature index of node.''' - return self.nodes[node_id][self._ind] - - def split_condition(self, node_id: int): - '''Split value of a node.''' - return self.nodes[node_id][self._cond] - - def parent(self, node_id: int): - '''Parent ID of a node.''' - return self.nodes[node_id][self._parent] - - def left_child(self, node_id: int): - '''Left child ID of a node.''' - return self.nodes[node_id][self._left] - - def right_child(self, node_id: int): - '''Right child ID of a node.''' - return self.nodes[node_id][self._right] - - def is_leaf(self, node_id: int): - '''Whether a node is leaf.''' - return self.nodes[node_id][self._left] == -1 - - def is_deleted(self, node_id: int): - '''Whether a node is deleted.''' - # std::numeric_limits::max() - return self.nodes[node_id][self._ind] == 4294967295 - - def __str__(self): - stacks = [0] - nodes = [] - while stacks: - node = {} - nid = stacks.pop() - - node['node id'] = nid - node['gain'] = self.loss_change(nid) - node['cover'] = self.sum_hessian(nid) - nodes.append(node) - - if not self.is_leaf(nid) and not self.is_deleted(nid): - left = self.left_child(nid) - right = self.right_child(nid) - stacks.append(left) - stacks.append(right) - - string = '\n'.join(map(lambda x: ' ' + str(x), nodes)) - return string - - -class Model: - '''Gradient boosted tree model.''' - def __init__(self, m: dict): - '''Construct the Model from JSON object. - - parameters - ---------- - m: A dictionary loaded by json - ''' - # Basic property of a model - self.learner_model_shape = model['learner']['learner_model_param'] - self.num_output_group = int(self.learner_model_shape['num_class']) - self.num_feature = int(self.learner_model_shape['num_feature']) - self.base_score = float(self.learner_model_shape['base_score']) - # A field encoding which output group a tree belongs - self.tree_info = model['learner']['gradient_booster']['model'][ - 'tree_info'] - - model_shape = model['learner']['gradient_booster']['model'][ - 'gbtree_model_param'] - - # JSON representation of trees - j_trees = model['learner']['gradient_booster']['model']['trees'] - - # Load the trees - self.num_trees = int(model_shape['num_trees']) - self.leaf_size = int(model_shape['size_leaf_vector']) - # Right now XGBoost doesn't support vector leaf yet - assert self.leaf_size == 0, str(self.leaf_size) - - trees = [] - for i in range(self.num_trees): - tree = j_trees[i] - tree_id = int(tree['id']) - assert tree_id == i, (tree_id, i) - # properties - left_children = tree['left_children'] - right_children = tree['right_children'] - parents = tree['parents'] - split_conditions = tree['split_conditions'] - split_indices = tree['split_indices'] - default_left = tree['default_left'] - # stats - base_weights = tree['base_weights'] - loss_changes = tree['loss_changes'] - sum_hessian = tree['sum_hessian'] - leaf_child_counts = tree['leaf_child_counts'] - - stats = [] - nodes = [] - # We resemble the structure used inside XGBoost, which is similar - # to adjacency list. - for node_id in range(len(left_children)): - nodes.append([ - left_children[node_id], right_children[node_id], - parents[node_id], split_indices[node_id], - split_conditions[node_id], default_left[node_id] - ]) - stats.append([ - loss_changes[node_id], sum_hessian[node_id], - base_weights[node_id], leaf_child_counts[node_id] - ]) - - tree = Tree(tree_id, nodes, stats) - trees.append(tree) - - self.trees = trees - - def print_model(self): - for i, tree in enumerate(self.trees): - print('tree_id:', i) - print(tree) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description='Demonstration for loading and printing XGBoost model.') - parser.add_argument('--model', - type=str, - required=True, - help='Path to JSON model file.') - args = parser.parse_args() - with open(args.model, 'r') as fd: - model = json.load(fd) - model = Model(model) - model.print_model() diff --git a/ml-xgboost/demo/kaggle-higgs/README.md b/ml-xgboost/demo/kaggle-higgs/README.md deleted file mode 100644 index d202a99..0000000 --- a/ml-xgboost/demo/kaggle-higgs/README.md +++ /dev/null @@ -1,31 +0,0 @@ -Highlights -===== -Higgs challenge ends recently, xgboost is being used by many users. This list highlights the xgboost solutions of players -* Blogpost by phunther: [Winning solution of Kaggle Higgs competition: what a single model can do](http://no2147483647.wordpress.com/2014/09/17/winning-solution-of-kaggle-higgs-competition-what-a-single-model-can-do/) -* The solution by Tianqi Chen and Tong He [Link](https://github.com/hetong007/higgsml) - -Guide for Kaggle Higgs Challenge -===== - -This is the folder giving example of how to use XGBoost Python Module to run Kaggle Higgs competition - -This script will achieve about 3.600 AMS score in public leaderboard. To get start, you need do following step: - -1. Compile the XGBoost python lib -```bash -cd ../.. -make -``` - -2. Put training.csv test.csv on folder './data' (you can create a symbolic link) - -3. Run ./run.sh - -Speed -===== -speedtest.py compares xgboost's speed on this dataset with sklearn.GBM - - -Using R module -===== -* Alternatively, you can run using R, higgs-train.R and higgs-pred.R. diff --git a/ml-xgboost/demo/kaggle-higgs/higgs-cv.py b/ml-xgboost/demo/kaggle-higgs/higgs-cv.py deleted file mode 100644 index d5bbc39..0000000 --- a/ml-xgboost/demo/kaggle-higgs/higgs-cv.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/python -import numpy as np -import xgboost as xgb - -### load data in do training -train = np.loadtxt('./data/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s'.encode('utf-8')) } ) -label = train[:,32] -data = train[:,1:31] -weight = train[:,31] -dtrain = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight ) -param = {'max_depth':6, 'eta':0.1, 'silent':1, 'objective':'binary:logitraw', 'nthread':4} -num_round = 120 - -print ('running cross validation, with preprocessing function') -# define the preprocessing function -# used to return the preprocessed training, test data, and parameter -# we can use this to do weight rescale, etc. -# as a example, we try to set scale_pos_weight -def fpreproc(dtrain, dtest, param): - label = dtrain.get_label() - ratio = float(np.sum(label == 0)) / np.sum(label==1) - param['scale_pos_weight'] = ratio - wtrain = dtrain.get_weight() - wtest = dtest.get_weight() - sum_weight = sum(wtrain) + sum(wtest) - wtrain *= sum_weight / sum(wtrain) - wtest *= sum_weight / sum(wtest) - dtrain.set_weight(wtrain) - dtest.set_weight(wtest) - return (dtrain, dtest, param) - -# do cross validation, for each fold -# the dtrain, dtest, param will be passed into fpreproc -# then the return value of fpreproc will be used to generate -# results of that fold -xgb.cv(param, dtrain, num_round, nfold=5, - metrics={'ams@0.15', 'auc'}, seed = 0, fpreproc = fpreproc) diff --git a/ml-xgboost/demo/kaggle-higgs/higgs-numpy.py b/ml-xgboost/demo/kaggle-higgs/higgs-numpy.py deleted file mode 100644 index 004dd55..0000000 --- a/ml-xgboost/demo/kaggle-higgs/higgs-numpy.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/python -# this is the example script to use xgboost to train -import numpy as np - -import xgboost as xgb - -test_size = 550000 - -# path to where the data lies -dpath = 'data' - -# load in training data, directly use numpy -dtrain = np.loadtxt( dpath+'/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s'.encode('utf-8')) } ) -print ('finish loading from csv ') - -label = dtrain[:,32] -data = dtrain[:,1:31] -# rescale weight to make it same as test set -weight = dtrain[:,31] * float(test_size) / len(label) - -sum_wpos = sum( weight[i] for i in range(len(label)) if label[i] == 1.0 ) -sum_wneg = sum( weight[i] for i in range(len(label)) if label[i] == 0.0 ) - -# print weight statistics -print ('weight statistics: wpos=%g, wneg=%g, ratio=%g' % ( sum_wpos, sum_wneg, sum_wneg/sum_wpos )) - -# construct xgboost.DMatrix from numpy array, treat -999.0 as missing value -xgmat = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight ) - -# setup parameters for xgboost -param = {} -# use logistic regression loss, use raw prediction before logistic transformation -# since we only need the rank -param['objective'] = 'binary:logitraw' -# scale weight of positive examples -param['scale_pos_weight'] = sum_wneg/sum_wpos -param['eta'] = 0.1 -param['max_depth'] = 6 -param['eval_metric'] = 'auc' -param['silent'] = 1 -param['nthread'] = 16 - -# you can directly throw param in, though we want to watch multiple metrics here -plst = list(param.items())+[('eval_metric', 'ams@0.15')] - -watchlist = [ (xgmat,'train') ] -# boost 120 trees -num_round = 120 -print ('loading data end, start to boost trees') -bst = xgb.train( plst, xgmat, num_round, watchlist ); -# save out model -bst.save_model('higgs.model') - -print ('finish training') diff --git a/ml-xgboost/demo/kaggle-higgs/higgs-pred.R b/ml-xgboost/demo/kaggle-higgs/higgs-pred.R deleted file mode 100644 index 5136223..0000000 --- a/ml-xgboost/demo/kaggle-higgs/higgs-pred.R +++ /dev/null @@ -1,24 +0,0 @@ -# install xgboost package, see R-package in root folder -require(xgboost) -require(methods) - -modelfile <- "higgs.model" -outfile <- "higgs.pred.csv" -dtest <- read.csv("data/test.csv", header=TRUE) -data <- as.matrix(dtest[2:31]) -idx <- dtest[[1]] - -xgmat <- xgb.DMatrix(data, missing = -999.0) -bst <- xgb.load(modelfile=modelfile) -ypred <- predict(bst, xgmat) - -rorder <- rank(ypred, ties.method="first") - -threshold <- 0.15 -# to be completed -ntop <- length(rorder) - as.integer(threshold*length(rorder)) -plabel <- ifelse(rorder > ntop, "s", "b") -outdata <- list("EventId" = idx, - "RankOrder" = rorder, - "Class" = plabel) -write.csv(outdata, file = outfile, quote=FALSE, row.names=FALSE) diff --git a/ml-xgboost/demo/kaggle-higgs/higgs-pred.py b/ml-xgboost/demo/kaggle-higgs/higgs-pred.py deleted file mode 100644 index bc669f5..0000000 --- a/ml-xgboost/demo/kaggle-higgs/higgs-pred.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/python -# make prediction -import numpy as np -import xgboost as xgb - -# path to where the data lies -dpath = 'data' - -modelfile = 'higgs.model' -outfile = 'higgs.pred.csv' -# make top 15% as positive -threshold_ratio = 0.15 - -# load in training data, directly use numpy -dtest = np.loadtxt( dpath+'/test.csv', delimiter=',', skiprows=1 ) -data = dtest[:,1:31] -idx = dtest[:,0] - -print ('finish loading from csv ') -xgmat = xgb.DMatrix( data, missing = -999.0 ) -bst = xgb.Booster({'nthread':16}, model_file = modelfile) -ypred = bst.predict( xgmat ) - -res = [ ( int(idx[i]), ypred[i] ) for i in range(len(ypred)) ] - -rorder = {} -for k, v in sorted( res, key = lambda x:-x[1] ): - rorder[ k ] = len(rorder) + 1 - -# write out predictions -ntop = int( threshold_ratio * len(rorder ) ) -fo = open(outfile, 'w') -nhit = 0 -ntot = 0 -fo.write('EventId,RankOrder,Class\n') -for k, v in res: - if rorder[k] <= ntop: - lb = 's' - nhit += 1 - else: - lb = 'b' - # change output rank order to follow Kaggle convention - fo.write('%s,%d,%s\n' % ( k, len(rorder)+1-rorder[k], lb ) ) - ntot += 1 -fo.close() - -print ('finished writing into prediction file') - - - diff --git a/ml-xgboost/demo/kaggle-higgs/higgs-train.R b/ml-xgboost/demo/kaggle-higgs/higgs-train.R deleted file mode 100644 index 426d1f6..0000000 --- a/ml-xgboost/demo/kaggle-higgs/higgs-train.R +++ /dev/null @@ -1,33 +0,0 @@ -# install xgboost package, see R-package in root folder -require(xgboost) -require(methods) - -testsize <- 550000 - -dtrain <- read.csv("data/training.csv", header=TRUE) -dtrain[33] <- dtrain[33] == "s" -label <- as.numeric(dtrain[[33]]) -data <- as.matrix(dtrain[2:31]) -weight <- as.numeric(dtrain[[32]]) * testsize / length(label) - -sumwpos <- sum(weight * (label==1.0)) -sumwneg <- sum(weight * (label==0.0)) -print(paste("weight statistics: wpos=", sumwpos, "wneg=", sumwneg, "ratio=", sumwneg / sumwpos)) - -xgmat <- xgb.DMatrix(data, label = label, weight = weight, missing = -999.0) -param <- list("objective" = "binary:logitraw", - "scale_pos_weight" = sumwneg / sumwpos, - "bst:eta" = 0.1, - "bst:max_depth" = 6, - "eval_metric" = "auc", - "eval_metric" = "ams@0.15", - "silent" = 1, - "nthread" = 16) -watchlist <- list("train" = xgmat) -nrounds = 120 -print ("loading data end, start to boost trees") -bst = xgb.train(param, xgmat, nrounds, watchlist ); -# save out model -xgb.save(bst, "higgs.model") -print ('finish training') - diff --git a/ml-xgboost/demo/kaggle-higgs/run.sh b/ml-xgboost/demo/kaggle-higgs/run.sh deleted file mode 100644 index 23cde39..0000000 --- a/ml-xgboost/demo/kaggle-higgs/run.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -python -u higgs-numpy.py -ret=$? -if [[ $ret != 0 ]]; then - echo "ERROR in higgs-numpy.py" - exit $ret -fi -python -u higgs-pred.py -ret=$? -if [[ $ret != 0 ]]; then - echo "ERROR in higgs-pred.py" - exit $ret -fi diff --git a/ml-xgboost/demo/kaggle-higgs/speedtest.R b/ml-xgboost/demo/kaggle-higgs/speedtest.R deleted file mode 100644 index d17d8a1..0000000 --- a/ml-xgboost/demo/kaggle-higgs/speedtest.R +++ /dev/null @@ -1,71 +0,0 @@ -# install xgboost package, see R-package in root folder -require(xgboost) -require(gbm) -require(methods) - -testsize <- 550000 - -dtrain <- read.csv("data/training.csv", header=TRUE, nrows=350001) -dtrain$Label = as.numeric(dtrain$Label=='s') -# gbm.time = system.time({ -# gbm.model <- gbm(Label ~ ., data = dtrain[, -c(1,32)], n.trees = 120, -# interaction.depth = 6, shrinkage = 0.1, bag.fraction = 1, -# verbose = TRUE) -# }) -# print(gbm.time) -# Test result: 761.48 secs - -# dtrain[33] <- dtrain[33] == "s" -# label <- as.numeric(dtrain[[33]]) -data <- as.matrix(dtrain[2:31]) -weight <- as.numeric(dtrain[[32]]) * testsize / length(label) - -sumwpos <- sum(weight * (label==1.0)) -sumwneg <- sum(weight * (label==0.0)) -print(paste("weight statistics: wpos=", sumwpos, "wneg=", sumwneg, "ratio=", sumwneg / sumwpos)) - -xgboost.time = list() -threads = c(1,2,4,8,16) -for (i in 1:length(threads)){ - thread = threads[i] - xgboost.time[[i]] = system.time({ - xgmat <- xgb.DMatrix(data, label = label, weight = weight, missing = -999.0) - param <- list("objective" = "binary:logitraw", - "scale_pos_weight" = sumwneg / sumwpos, - "bst:eta" = 0.1, - "bst:max_depth" = 6, - "eval_metric" = "auc", - "eval_metric" = "ams@0.15", - "silent" = 1, - "nthread" = thread) - watchlist <- list("train" = xgmat) - nrounds = 120 - print ("loading data end, start to boost trees") - bst = xgb.train(param, xgmat, nrounds, watchlist ); - # save out model - xgb.save(bst, "higgs.model") - print ('finish training') - }) -} - -xgboost.time -# [[1]] -# user system elapsed -# 99.015 0.051 98.982 -# -# [[2]] -# user system elapsed -# 100.268 0.317 55.473 -# -# [[3]] -# user system elapsed -# 111.682 0.777 35.963 -# -# [[4]] -# user system elapsed -# 149.396 1.851 32.661 -# -# [[5]] -# user system elapsed -# 157.390 5.988 40.949 - diff --git a/ml-xgboost/demo/kaggle-higgs/speedtest.py b/ml-xgboost/demo/kaggle-higgs/speedtest.py deleted file mode 100644 index 93672de..0000000 --- a/ml-xgboost/demo/kaggle-higgs/speedtest.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/python -# this is the example script to use xgboost to train -import numpy as np -import xgboost as xgb -from sklearn.ensemble import GradientBoostingClassifier -import time -test_size = 550000 - -# path to where the data lies -dpath = 'data' - -# load in training data, directly use numpy -dtrain = np.loadtxt( dpath+'/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s') } ) -print ('finish loading from csv ') - -label = dtrain[:,32] -data = dtrain[:,1:31] -# rescale weight to make it same as test set -weight = dtrain[:,31] * float(test_size) / len(label) - -sum_wpos = sum( weight[i] for i in range(len(label)) if label[i] == 1.0 ) -sum_wneg = sum( weight[i] for i in range(len(label)) if label[i] == 0.0 ) - -# print weight statistics -print ('weight statistics: wpos=%g, wneg=%g, ratio=%g' % ( sum_wpos, sum_wneg, sum_wneg/sum_wpos )) - -# construct xgboost.DMatrix from numpy array, treat -999.0 as missing value -xgmat = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight ) - -# setup parameters for xgboost -param = {} -# use logistic regression loss -param['objective'] = 'binary:logitraw' -# scale weight of positive examples -param['scale_pos_weight'] = sum_wneg/sum_wpos -param['bst:eta'] = 0.1 -param['bst:max_depth'] = 6 -param['eval_metric'] = 'auc' -param['silent'] = 1 -param['nthread'] = 4 - -plst = param.items()+[('eval_metric', 'ams@0.15')] - -watchlist = [ (xgmat,'train') ] -# boost 10 trees -num_round = 10 -print ('loading data end, start to boost trees') -print ("training GBM from sklearn") -tmp = time.time() -gbm = GradientBoostingClassifier(n_estimators=num_round, max_depth=6, verbose=2) -gbm.fit(data, label) -print ("sklearn.GBM costs: %s seconds" % str(time.time() - tmp)) -#raw_input() -print ("training xgboost") -threads = [1, 2, 4, 16] -for i in threads: - param['nthread'] = i - tmp = time.time() - plst = param.items()+[('eval_metric', 'ams@0.15')] - bst = xgb.train( plst, xgmat, num_round, watchlist ); - print ("XGBoost with %d thread costs: %s seconds" % (i, str(time.time() - tmp))) - -print ('finish training') diff --git a/ml-xgboost/demo/kaggle-otto/README.MD b/ml-xgboost/demo/kaggle-otto/README.MD deleted file mode 100644 index bdb3b07..0000000 --- a/ml-xgboost/demo/kaggle-otto/README.MD +++ /dev/null @@ -1,22 +0,0 @@ -Benchmark for Otto Group Competition -========= - -This is a folder containing the benchmark for the [Otto Group Competition on Kaggle](http://www.kaggle.com/c/otto-group-product-classification-challenge). - -## Getting started - -1. Put `train.csv` and `test.csv` under the `data` folder -2. Run the script -3. Submit the `submission.csv` - -The parameter `nthread` controls the number of cores to run on, please set it to suit your machine. - -## R-package - -To install the R-package of xgboost, please run - -```r -devtools::install_github('tqchen/xgboost',subdir='R-package') -``` - -Windows users may need to install [RTools](http://cran.r-project.org/bin/windows/Rtools/) first. diff --git a/ml-xgboost/demo/kaggle-otto/otto_train_pred.R b/ml-xgboost/demo/kaggle-otto/otto_train_pred.R deleted file mode 100644 index ec0f85e..0000000 --- a/ml-xgboost/demo/kaggle-otto/otto_train_pred.R +++ /dev/null @@ -1,43 +0,0 @@ -require(xgboost) -require(methods) - -train = read.csv('data/train.csv',header=TRUE,stringsAsFactors = F) -test = read.csv('data/test.csv',header=TRUE,stringsAsFactors = F) -train = train[,-1] -test = test[,-1] - -y = train[,ncol(train)] -y = gsub('Class_','',y) -y = as.integer(y)-1 # xgboost take features in [0,numOfClass) - -x = rbind(train[,-ncol(train)],test) -x = as.matrix(x) -x = matrix(as.numeric(x),nrow(x),ncol(x)) -trind = 1:length(y) -teind = (nrow(train)+1):nrow(x) - -# Set necessary parameter -param <- list("objective" = "multi:softprob", - "eval_metric" = "mlogloss", - "num_class" = 9, - "nthread" = 8) - -# Run Cross Validation -cv.nrounds = 50 -bst.cv = xgb.cv(param=param, data = x[trind,], label = y, - nfold = 3, nrounds=cv.nrounds) - -# Train the model -nrounds = 50 -bst = xgboost(param=param, data = x[trind,], label = y, nrounds=nrounds) - -# Make prediction -pred = predict(bst,x[teind,]) -pred = matrix(pred,9,length(pred)/9) -pred = t(pred) - -# Output submission -pred = format(pred, digits=2,scientific=F) # shrink the size of submission -pred = data.frame(1:nrow(pred),pred) -names(pred) = c('id', paste0('Class_',1:9)) -write.csv(pred,file='submission.csv', quote=FALSE,row.names=FALSE) diff --git a/ml-xgboost/demo/kaggle-otto/understandingXGBoostModel.Rmd b/ml-xgboost/demo/kaggle-otto/understandingXGBoostModel.Rmd deleted file mode 100644 index b37c407..0000000 --- a/ml-xgboost/demo/kaggle-otto/understandingXGBoostModel.Rmd +++ /dev/null @@ -1,231 +0,0 @@ ---- -title: "Understanding XGBoost Model on Otto Dataset" -author: "Michaël Benesty" -output: - rmarkdown::html_vignette: - css: ../../R-package/vignettes/vignette.css - number_sections: yes - toc: yes ---- - -Introduction -============ - -**XGBoost** is an implementation of the famous gradient boosting algorithm. This model is often described as a *blackbox*, meaning it works well but it is not trivial to understand how. Indeed, the model is made of hundreds (thousands?) of decision trees. You may wonder how possible a human would be able to have a general view of the model? - -While XGBoost is known for its fast speed and accurate predictive power, it also comes with various functions to help you understand the model. -The purpose of this RMarkdown document is to demonstrate how easily we can leverage the functions already implemented in **XGBoost R** package. Of course, everything showed below can be applied to the dataset you may have to manipulate at work or wherever! - -First we will prepare the **Otto** dataset and train a model, then we will generate two visualisations to get a clue of what is important to the model, finally, we will see how we can leverage these information. - -Preparation of the data -======================= - -This part is based on the **R** tutorial example by [Tong He](https://github.com/dmlc/xgboost/blob/master/demo/kaggle-otto/otto_train_pred.R) - -First, let's load the packages and the dataset. - -```{r loading} -require(xgboost) -require(methods) -require(data.table) -require(magrittr) -train <- fread('data/train.csv', header = T, stringsAsFactors = F) -test <- fread('data/test.csv', header=TRUE, stringsAsFactors = F) -``` -> `magrittr` and `data.table` are here to make the code cleaner and much more rapid. - -Let's explore the dataset. - -```{r explore} -# Train dataset dimensions -dim(train) - -# Training content -train[1:6,1:5, with =F] - -# Test dataset dimensions -dim(test) - -# Test content -test[1:6,1:5, with =F] -``` -> We only display the 6 first rows and 5 first columns for convenience - -Each *column* represents a feature measured by an `integer`. Each *row* is an **Otto** product. - -Obviously the first column (`ID`) doesn't contain any useful information. - -To let the algorithm focus on real stuff, we will delete it. - -```{r clean, results='hide'} -# Delete ID column in training dataset -train[, id := NULL] - -# Delete ID column in testing dataset -test[, id := NULL] -``` - -According to its description, the **Otto** challenge is a multi class classification challenge. We need to extract the labels (here the name of the different classes) from the dataset. We only have two files (test and training), it seems logical that the training file contains the class we are looking for. Usually the labels is in the first or the last column. We already know what is in the first column, let's check the content of the last one. - -```{r searchLabel} -# Check the content of the last column -train[1:6, ncol(train), with = F] -# Save the name of the last column -nameLastCol <- names(train)[ncol(train)] -``` - -The classes are provided as character string in the `r ncol(train)`th column called `r nameLastCol`. As you may know, **XGBoost** doesn't support anything else than numbers. So we will convert classes to `integer`. Moreover, according to the documentation, it should start at `0`. - -For that purpose, we will: - -* extract the target column -* remove `Class_` from each class name -* convert to `integer` -* remove `1` to the new value - -```{r classToIntegers} -# Convert from classes to numbers -y <- train[, nameLastCol, with = F][[1]] %>% gsub('Class_','',.) %>% {as.integer(.) -1} - -# Display the first 5 levels -y[1:5] -``` - -We remove label column from training dataset, otherwise **XGBoost** would use it to guess the labels! - -```{r deleteCols, results='hide'} -train[, nameLastCol:=NULL, with = F] -``` - -`data.table` is an awesome implementation of data.frame, unfortunately it is not a format supported natively by **XGBoost**. We need to convert both datasets (training and test) in `numeric` Matrix format. - -```{r convertToNumericMatrix} -trainMatrix <- train[,lapply(.SD,as.numeric)] %>% as.matrix -testMatrix <- test[,lapply(.SD,as.numeric)] %>% as.matrix -``` - -Model training -============== - -Before the learning we will use the cross validation to evaluate the our error rate. - -Basically **XGBoost** will divide the training data in `nfold` parts, then **XGBoost** will retain the first part to use it as the test data and perform a training. Then it will reintegrate the first part and retain the second part, do a training and so on... - -You can look at the function documentation for more information. - -```{r crossValidation} -numberOfClasses <- max(y) + 1 - -param <- list("objective" = "multi:softprob", - "eval_metric" = "mlogloss", - "num_class" = numberOfClasses) - -cv.nrounds <- 5 -cv.nfold <- 3 - -bst.cv = xgb.cv(param=param, data = trainMatrix, label = y, - nfold = cv.nfold, nrounds = cv.nrounds) -``` -> As we can see the error rate is low on the test dataset (for a 5mn trained model). - -Finally, we are ready to train the real model!!! - -```{r modelTraining} -nrounds = 50 -bst = xgboost(param=param, data = trainMatrix, label = y, nrounds=nrounds) -``` - -Model understanding -=================== - -Feature importance ------------------- - -So far, we have built a model made of **`r nrounds`** trees. - -To build a tree, the dataset is divided recursively several times. At the end of the process, you get groups of observations (here, these observations are properties regarding **Otto** products). - -Each division operation is called a *split*. - -Each group at each division level is called a branch and the deepest level is called a *leaf*. - -In the final model, these *leafs* are supposed to be as pure as possible for each tree, meaning in our case that each *leaf* should be made of one class of **Otto** product only (of course it is not true, but that's what we try to achieve in a minimum of splits). - -**Not all *splits* are equally important**. Basically the first *split* of a tree will have more impact on the purity that, for instance, the deepest *split*. Intuitively, we understand that the first *split* makes most of the work, and the following *splits* focus on smaller parts of the dataset which have been misclassified by the first *tree*. - -In the same way, in Boosting we try to optimize the misclassification at each round (it is called the *loss*). So the first *tree* will do the big work and the following trees will focus on the remaining, on the parts not correctly learned by the previous *trees*. - -The improvement brought by each *split* can be measured, it is the *gain*. - -Each *split* is done on one feature only at one value. - -Let's see what the model looks like. - -```{r modelDump} -model <- xgb.dump(bst, with.stats = T) -model[1:10] -``` -> For convenience, we are displaying the first 10 lines of the model only. - -Clearly, it is not easy to understand what it means. - -Basically each line represents a *branch*, there is the *tree* ID, the feature ID, the point where it *splits*, and information regarding the next *branches* (left, right, when the row for this feature is N/A). - -Hopefully, **XGBoost** offers a better representation: **feature importance**. - -Feature importance is about averaging the *gain* of each feature for all *split* and all *trees*. - -Then we can use the function `xgb.plot.importance`. - -```{r importanceFeature, fig.align='center', fig.height=5, fig.width=10} -# Get the feature real names -names <- dimnames(trainMatrix)[[2]] - -# Compute feature importance matrix -importance_matrix <- xgb.importance(names, model = bst) - -# Nice graph -xgb.plot.importance(importance_matrix[1:10,]) -``` - -> To make it understandable we first extract the column names from the `Matrix`. - -Interpretation --------------- - -In the feature importance above, we can see the first 10 most important features. - -This function gives a color to each bar. These colors represent groups of features. Basically a K-means clustering is applied to group each feature by importance. - -From here you can take several actions. For instance you can remove the less important feature (feature selection process), or go deeper in the interaction between the most important features and labels. - -Or you can just reason about why these features are so important (in **Otto** challenge we can't go this way because there is not enough information). - -Tree graph ----------- - -Feature importance gives you feature weight information but not interaction between features. - -**XGBoost R** package have another useful function for that. - -Please, scroll on the right to see the tree. - -```{r treeGraph, dpi=1500, fig.align='left'} -xgb.plot.tree(feature_names = names, model = bst, n_first_tree = 2) -``` - -We are just displaying the first two trees here. - -On simple models the first two trees may be enough. Here, it might not be the case. We can see from the size of the trees that the interaction between features is complicated. -Besides, **XGBoost** generate `k` trees at each round for a `k`-classification problem. Therefore the two trees illustrated here are trying to classify data into different classes. - -Going deeper -============ - -There are 4 documents you may also be interested in: - -* [xgboostPresentation.Rmd](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/xgboostPresentation.Rmd): general presentation -* [discoverYourData.Rmd](https://github.com/dmlc/xgboost/blob/master/R-package/vignettes/discoverYourData.Rmd): explaining feature analysis -* [Feature Importance Analysis with XGBoost in Tax audit](http://fr.slideshare.net/MichaelBENESTY/feature-importance-analysis-with-xgboost-in-tax-audit): use case -* [The Elements of Statistical Learning](http://statweb.stanford.edu/~tibs/ElemStatLearn/): very good book to have a good understanding of the model diff --git a/ml-xgboost/demo/multiclass_classification/README.md b/ml-xgboost/demo/multiclass_classification/README.md deleted file mode 100644 index 6554ee1..0000000 --- a/ml-xgboost/demo/multiclass_classification/README.md +++ /dev/null @@ -1,10 +0,0 @@ -Demonstrating how to use XGBoost accomplish Multi-Class classification task on [UCI Dermatology dataset](https://archive.ics.uci.edu/ml/datasets/Dermatology) - -Make sure you make xgboost python module in ../../python - -1. Run runexp.sh -```bash -./runexp.sh -``` - -**R version** please see the `train.R`. diff --git a/ml-xgboost/demo/multiclass_classification/runexp.sh b/ml-xgboost/demo/multiclass_classification/runexp.sh deleted file mode 100644 index 0af8147..0000000 --- a/ml-xgboost/demo/multiclass_classification/runexp.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -if [ -f dermatology.data ] -then - echo "use existing data to run multi class classification" -else - echo "getting data from uci, make sure you are connected to internet" - wget https://archive.ics.uci.edu/ml/machine-learning-databases/dermatology/dermatology.data -fi -python train.py diff --git a/ml-xgboost/demo/multiclass_classification/train.R b/ml-xgboost/demo/multiclass_classification/train.R deleted file mode 100644 index 4a07f27..0000000 --- a/ml-xgboost/demo/multiclass_classification/train.R +++ /dev/null @@ -1,64 +0,0 @@ -library(data.table) -library(xgboost) - -if (!file.exists("./dermatology.data")) { - download.file( - "https://archive.ics.uci.edu/ml/machine-learning-databases/dermatology/dermatology.data", - "dermatology.data", - method = "curl" - ) -} - -df <- fread("dermatology.data", sep = ",", header = FALSE) - -df[, `:=`(V34 = as.integer(ifelse(V34 == "?", 0L, V34)), - V35 = V35 - 1L)] - -idx <- sample(nrow(df), size = round(0.7 * nrow(df)), replace = FALSE) - -train <- df[idx,] -test <- df[-idx,] - -train_x <- train[, 1:34] -train_y <- train[, V35] - -test_x <- test[, 1:34] -test_y <- test[, V35] - -xg_train <- xgb.DMatrix(data = as.matrix(train_x), label = train_y) -xg_test = xgb.DMatrix(as.matrix(test_x), label = test_y) - -params <- list( - objective = 'multi:softmax', - num_class = 6, - max_depth = 6, - nthread = 4, - eta = 0.1 -) - -watchlist = list(train = xg_train, test = xg_test) - -bst <- xgb.train( - params = params, - data = xg_train, - watchlist = watchlist, - nrounds = 5 -) - -pred <- predict(bst, xg_test) -error_rate <- sum(pred != test_y) / length(test_y) -print(paste("Test error using softmax =", error_rate)) - -# do the same thing again, but output probabilities -params$objective <- 'multi:softprob' -bst <- xgb.train(params, xg_train, nrounds = 5, watchlist) - -pred_prob <- predict(bst, xg_test) - -pred_mat <- matrix(pred_prob, ncol = 6, byrow = TRUE) -# validation -# rowSums(pred_mat) - -pred_label <- apply(pred_mat, 1, which.max) - 1L -error_rate = sum(pred_label != test_y) / length(test_y) -print(paste("Test error using softprob =", error_rate)) diff --git a/ml-xgboost/demo/multiclass_classification/train.py b/ml-xgboost/demo/multiclass_classification/train.py deleted file mode 100644 index 4dbce82..0000000 --- a/ml-xgboost/demo/multiclass_classification/train.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/python - -from __future__ import division - -import numpy as np -import xgboost as xgb - -# label need to be 0 to num_class -1 -data = np.loadtxt('./dermatology.data', delimiter=',', - converters={33: lambda x:int(x == '?'), 34: lambda x:int(x) - 1}) -sz = data.shape - -train = data[:int(sz[0] * 0.7), :] -test = data[int(sz[0] * 0.7):, :] - -train_X = train[:, :33] -train_Y = train[:, 34] - -test_X = test[:, :33] -test_Y = test[:, 34] - -xg_train = xgb.DMatrix(train_X, label=train_Y) -xg_test = xgb.DMatrix(test_X, label=test_Y) -# setup parameters for xgboost -param = {} -# use softmax multi-class classification -param['objective'] = 'multi:softmax' -# scale weight of positive examples -param['eta'] = 0.1 -param['max_depth'] = 6 -param['silent'] = 1 -param['nthread'] = 4 -param['num_class'] = 6 - -watchlist = [(xg_train, 'train'), (xg_test, 'test')] -num_round = 5 -bst = xgb.train(param, xg_train, num_round, watchlist) -# get prediction -pred = bst.predict(xg_test) -error_rate = np.sum(pred != test_Y) / test_Y.shape[0] -print('Test error using softmax = {}'.format(error_rate)) - -# do the same thing again, but output probabilities -param['objective'] = 'multi:softprob' -bst = xgb.train(param, xg_train, num_round, watchlist) -# Note: this convention has been changed since xgboost-unity -# get prediction, this is in 1D array, need reshape to (ndata, nclass) -pred_prob = bst.predict(xg_test).reshape(test_Y.shape[0], 6) -pred_label = np.argmax(pred_prob, axis=1) -error_rate = np.sum(pred_label != test_Y) / test_Y.shape[0] -print('Test error using softprob = {}'.format(error_rate)) diff --git a/ml-xgboost/demo/rank/README.md b/ml-xgboost/demo/rank/README.md deleted file mode 100644 index 1f112b4..0000000 --- a/ml-xgboost/demo/rank/README.md +++ /dev/null @@ -1,41 +0,0 @@ -Learning to rank -==== -XGBoost supports accomplishing ranking tasks. In ranking scenario, data are often grouped and we need the [group information file](../../doc/tutorials/input_format.rst#group-input-format) to specify ranking tasks. The model used in XGBoost for ranking is the LambdaRank. See [parameters](../../doc/parameter.rst) for supported metrics. - -### Parameters -The configuration setting is similar to the regression and binary classification setting, except user need to specify the objectives: - -``` -... -objective="rank:pairwise" -... -``` -For more usage details please refer to the [binary classification demo](../binary_classification), - -Instructions -==== -The dataset for ranking demo is from LETOR04 MQ2008 fold1. -Before running the examples, you need to get the data by running: - -``` -./wgetdata.sh -``` - -### Command Line -Run the example: -``` -./runexp.sh -``` - -### Python -There are two ways of doing ranking in python. - -Run the example using `xgboost.train`: -``` -python rank.py -``` - -Run the example using `XGBRanker`: -``` -python rank_sklearn.py -``` diff --git a/ml-xgboost/demo/rank/mq2008.conf b/ml-xgboost/demo/rank/mq2008.conf deleted file mode 100644 index a19758b..0000000 --- a/ml-xgboost/demo/rank/mq2008.conf +++ /dev/null @@ -1,28 +0,0 @@ -# General Parameters, see comment for each definition - -# specify objective -objective="rank:pairwise" - -# Tree Booster Parameters -# step size shrinkage -eta = 0.1 -# minimum loss reduction required to make a further partition -gamma = 1.0 -# minimum sum of instance weight(hessian) needed in a child -min_child_weight = 0.1 -# maximum depth of a tree -max_depth = 6 - -# Task parameters -# the number of round to do boosting -num_round = 4 -# 0 means do not save any model except the final round model -save_period = 0 -# The path of training data -data = "mq2008.train" -# The path of validation data, used to monitor training process, here [test] sets name of the validation set -eval[test] = "mq2008.vali" -# The path of test data -test:data = "mq2008.test" - - diff --git a/ml-xgboost/demo/rank/rank.py b/ml-xgboost/demo/rank/rank.py deleted file mode 100644 index d19b2c5..0000000 --- a/ml-xgboost/demo/rank/rank.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/python -import xgboost as xgb -from xgboost import DMatrix -from sklearn.datasets import load_svmlight_file - - -# This script demonstrate how to do ranking with xgboost.train -x_train, y_train = load_svmlight_file("mq2008.train") -x_valid, y_valid = load_svmlight_file("mq2008.vali") -x_test, y_test = load_svmlight_file("mq2008.test") - -group_train = [] -with open("mq2008.train.group", "r") as f: - data = f.readlines() - for line in data: - group_train.append(int(line.split("\n")[0])) - -group_valid = [] -with open("mq2008.vali.group", "r") as f: - data = f.readlines() - for line in data: - group_valid.append(int(line.split("\n")[0])) - -group_test = [] -with open("mq2008.test.group", "r") as f: - data = f.readlines() - for line in data: - group_test.append(int(line.split("\n")[0])) - -train_dmatrix = DMatrix(x_train, y_train) -valid_dmatrix = DMatrix(x_valid, y_valid) -test_dmatrix = DMatrix(x_test) - -train_dmatrix.set_group(group_train) -valid_dmatrix.set_group(group_valid) - -params = {'objective': 'rank:ndcg', 'eta': 0.1, 'gamma': 1.0, - 'min_child_weight': 0.1, 'max_depth': 6} -xgb_model = xgb.train(params, train_dmatrix, num_boost_round=4, - evals=[(valid_dmatrix, 'validation')]) -pred = xgb_model.predict(test_dmatrix) diff --git a/ml-xgboost/demo/rank/rank_sklearn.py b/ml-xgboost/demo/rank/rank_sklearn.py deleted file mode 100644 index 723b8c7..0000000 --- a/ml-xgboost/demo/rank/rank_sklearn.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/python -import xgboost as xgb -from sklearn.datasets import load_svmlight_file - -# This script demonstrate how to do ranking with XGBRanker -x_train, y_train = load_svmlight_file("mq2008.train") -x_valid, y_valid = load_svmlight_file("mq2008.vali") -x_test, y_test = load_svmlight_file("mq2008.test") - -group_train = [] -with open("mq2008.train.group", "r") as f: - data = f.readlines() - for line in data: - group_train.append(int(line.split("\n")[0])) - -group_valid = [] -with open("mq2008.vali.group", "r") as f: - data = f.readlines() - for line in data: - group_valid.append(int(line.split("\n")[0])) - -group_test = [] -with open("mq2008.test.group", "r") as f: - data = f.readlines() - for line in data: - group_test.append(int(line.split("\n")[0])) - -params = {'objective': 'rank:ndcg', 'learning_rate': 0.1, - 'gamma': 1.0, 'min_child_weight': 0.1, - 'max_depth': 6, 'n_estimators': 4} -model = xgb.sklearn.XGBRanker(**params) -model.fit(x_train, y_train, group_train, verbose=True, - eval_set=[(x_valid, y_valid)], eval_group=[group_valid]) -pred = model.predict(x_test) diff --git a/ml-xgboost/demo/rank/runexp.sh b/ml-xgboost/demo/rank/runexp.sh deleted file mode 100644 index 5b299f9..0000000 --- a/ml-xgboost/demo/rank/runexp.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -../../xgboost mq2008.conf - -../../xgboost mq2008.conf task=pred model_in=0004.model diff --git a/ml-xgboost/demo/rank/trans_data.py b/ml-xgboost/demo/rank/trans_data.py deleted file mode 100644 index 7282848..0000000 --- a/ml-xgboost/demo/rank/trans_data.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys - -def save_data(group_data,output_feature,output_group): - if len(group_data) == 0: - return - - output_group.write(str(len(group_data))+"\n") - for data in group_data: - # only include nonzero features - feats = [ p for p in data[2:] if float(p.split(':')[1]) != 0.0 ] - output_feature.write(data[0] + " " + " ".join(feats) + "\n") - -if __name__ == "__main__": - if len(sys.argv) != 4: - print ("Usage: python trans_data.py [Ranksvm Format Input] [Output Feature File] [Output Group File]") - sys.exit(0) - - fi = open(sys.argv[1]) - output_feature = open(sys.argv[2],"w") - output_group = open(sys.argv[3],"w") - - group_data = [] - group = "" - for line in fi: - if not line: - break - if "#" in line: - line = line[:line.index("#")] - splits = line.strip().split(" ") - if splits[1] != group: - save_data(group_data,output_feature,output_group) - group_data = [] - group = splits[1] - group_data.append(splits) - - save_data(group_data,output_feature,output_group) - - fi.close() - output_feature.close() - output_group.close() - diff --git a/ml-xgboost/demo/rank/wgetdata.sh b/ml-xgboost/demo/rank/wgetdata.sh deleted file mode 100644 index 3bd5bd3..0000000 --- a/ml-xgboost/demo/rank/wgetdata.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -wget https://s3-us-west-2.amazonaws.com/xgboost-examples/MQ2008.rar -unrar x MQ2008.rar -mv -f MQ2008/Fold1/*.txt . - -python trans_data.py train.txt mq2008.train mq2008.train.group - -python trans_data.py test.txt mq2008.test mq2008.test.group - -python trans_data.py vali.txt mq2008.vali mq2008.vali.group diff --git a/ml-xgboost/demo/regression/README.md b/ml-xgboost/demo/regression/README.md deleted file mode 100644 index 0a87f37..0000000 --- a/ml-xgboost/demo/regression/README.md +++ /dev/null @@ -1,17 +0,0 @@ -Regression -==== -Using XGBoost for regression is very similar to using it for binary classification. We suggest that you can refer to the [binary classification demo](../binary_classification) first. In XGBoost if we use negative log likelihood as the loss function for regression, the training procedure is same as training binary classifier of XGBoost. - -### Tutorial -The dataset we used is the [computer hardware dataset from UCI repository](https://archive.ics.uci.edu/ml/datasets/Computer+Hardware). The demo for regression is almost the same as the [binary classification demo](../binary_classification), except a little difference in general parameter: -``` -# General parameter -# this is the only difference with classification, use reg:squarederror to do linear regression -# when labels are in [0,1] we can also use reg:logistic -objective = reg:squarederror -... - -``` - -The input format is same as binary classification, except that the label is now the target regression values. We use linear regression here, if we want use objective = reg:logistic logistic regression, the label needed to be pre-scaled into [0,1]. - diff --git a/ml-xgboost/demo/regression/machine.conf b/ml-xgboost/demo/regression/machine.conf deleted file mode 100644 index 14fc0f9..0000000 --- a/ml-xgboost/demo/regression/machine.conf +++ /dev/null @@ -1,30 +0,0 @@ -# General Parameters, see comment for each definition -# choose the tree booster, can also change to gblinear -booster = gbtree -# this is the only difference with classification, use reg:squarederror to do linear classification -# when labels are in [0,1] we can also use reg:logistic -objective = reg:squarederror - -# Tree Booster Parameters -# step size shrinkage -eta = 1.0 -# minimum loss reduction required to make a further partition -gamma = 1.0 -# minimum sum of instance weight(hessian) needed in a child -min_child_weight = 1 -# maximum depth of a tree -max_depth = 3 - -# Task parameters -# the number of round to do boosting -num_round = 2 -# 0 means do not save any model except the final round model -save_period = 0 -# The path of training data -data = "machine.txt.train" -# The path of validation data, used to monitor training process, here [test] sets name of the validation set -eval[test] = "machine.txt.test" -# The path of test data -test:data = "machine.txt.test" - - diff --git a/ml-xgboost/demo/regression/machine.data b/ml-xgboost/demo/regression/machine.data deleted file mode 100644 index 656ed8c..0000000 --- a/ml-xgboost/demo/regression/machine.data +++ /dev/null @@ -1,209 +0,0 @@ -adviser,32/60,125,256,6000,256,16,128,198,199 -amdahl,470v/7,29,8000,32000,32,8,32,269,253 -amdahl,470v/7a,29,8000,32000,32,8,32,220,253 -amdahl,470v/7b,29,8000,32000,32,8,32,172,253 -amdahl,470v/7c,29,8000,16000,32,8,16,132,132 -amdahl,470v/b,26,8000,32000,64,8,32,318,290 -amdahl,580-5840,23,16000,32000,64,16,32,367,381 -amdahl,580-5850,23,16000,32000,64,16,32,489,381 -amdahl,580-5860,23,16000,64000,64,16,32,636,749 -amdahl,580-5880,23,32000,64000,128,32,64,1144,1238 -apollo,dn320,400,1000,3000,0,1,2,38,23 -apollo,dn420,400,512,3500,4,1,6,40,24 -basf,7/65,60,2000,8000,65,1,8,92,70 -basf,7/68,50,4000,16000,65,1,8,138,117 -bti,5000,350,64,64,0,1,4,10,15 -bti,8000,200,512,16000,0,4,32,35,64 -burroughs,b1955,167,524,2000,8,4,15,19,23 -burroughs,b2900,143,512,5000,0,7,32,28,29 -burroughs,b2925,143,1000,2000,0,5,16,31,22 -burroughs,b4955,110,5000,5000,142,8,64,120,124 -burroughs,b5900,143,1500,6300,0,5,32,30,35 -burroughs,b5920,143,3100,6200,0,5,20,33,39 -burroughs,b6900,143,2300,6200,0,6,64,61,40 -burroughs,b6925,110,3100,6200,0,6,64,76,45 -c.r.d,68/10-80,320,128,6000,0,1,12,23,28 -c.r.d,universe:2203t,320,512,2000,4,1,3,69,21 -c.r.d,universe:68,320,256,6000,0,1,6,33,28 -c.r.d,universe:68/05,320,256,3000,4,1,3,27,22 -c.r.d,universe:68/137,320,512,5000,4,1,5,77,28 -c.r.d,universe:68/37,320,256,5000,4,1,6,27,27 -cdc,cyber:170/750,25,1310,2620,131,12,24,274,102 -cdc,cyber:170/760,25,1310,2620,131,12,24,368,102 -cdc,cyber:170/815,50,2620,10480,30,12,24,32,74 -cdc,cyber:170/825,50,2620,10480,30,12,24,63,74 -cdc,cyber:170/835,56,5240,20970,30,12,24,106,138 -cdc,cyber:170/845,64,5240,20970,30,12,24,208,136 -cdc,omega:480-i,50,500,2000,8,1,4,20,23 -cdc,omega:480-ii,50,1000,4000,8,1,5,29,29 -cdc,omega:480-iii,50,2000,8000,8,1,5,71,44 -cambex,1636-1,50,1000,4000,8,3,5,26,30 -cambex,1636-10,50,1000,8000,8,3,5,36,41 -cambex,1641-1,50,2000,16000,8,3,5,40,74 -cambex,1641-11,50,2000,16000,8,3,6,52,74 -cambex,1651-1,50,2000,16000,8,3,6,60,74 -dec,decsys:10:1091,133,1000,12000,9,3,12,72,54 -dec,decsys:20:2060,133,1000,8000,9,3,12,72,41 -dec,microvax-1,810,512,512,8,1,1,18,18 -dec,vax:11/730,810,1000,5000,0,1,1,20,28 -dec,vax:11/750,320,512,8000,4,1,5,40,36 -dec,vax:11/780,200,512,8000,8,1,8,62,38 -dg,eclipse:c/350,700,384,8000,0,1,1,24,34 -dg,eclipse:m/600,700,256,2000,0,1,1,24,19 -dg,eclipse:mv/10000,140,1000,16000,16,1,3,138,72 -dg,eclipse:mv/4000,200,1000,8000,0,1,2,36,36 -dg,eclipse:mv/6000,110,1000,4000,16,1,2,26,30 -dg,eclipse:mv/8000,110,1000,12000,16,1,2,60,56 -dg,eclipse:mv/8000-ii,220,1000,8000,16,1,2,71,42 -formation,f4000/100,800,256,8000,0,1,4,12,34 -formation,f4000/200,800,256,8000,0,1,4,14,34 -formation,f4000/200ap,800,256,8000,0,1,4,20,34 -formation,f4000/300,800,256,8000,0,1,4,16,34 -formation,f4000/300ap,800,256,8000,0,1,4,22,34 -four-phase,2000/260,125,512,1000,0,8,20,36,19 -gould,concept:32/8705,75,2000,8000,64,1,38,144,75 -gould,concept:32/8750,75,2000,16000,64,1,38,144,113 -gould,concept:32/8780,75,2000,16000,128,1,38,259,157 -hp,3000/30,90,256,1000,0,3,10,17,18 -hp,3000/40,105,256,2000,0,3,10,26,20 -hp,3000/44,105,1000,4000,0,3,24,32,28 -hp,3000/48,105,2000,4000,8,3,19,32,33 -hp,3000/64,75,2000,8000,8,3,24,62,47 -hp,3000/88,75,3000,8000,8,3,48,64,54 -hp,3000/iii,175,256,2000,0,3,24,22,20 -harris,100,300,768,3000,0,6,24,36,23 -harris,300,300,768,3000,6,6,24,44,25 -harris,500,300,768,12000,6,6,24,50,52 -harris,600,300,768,4500,0,1,24,45,27 -harris,700,300,384,12000,6,1,24,53,50 -harris,80,300,192,768,6,6,24,36,18 -harris,800,180,768,12000,6,1,31,84,53 -honeywell,dps:6/35,330,1000,3000,0,2,4,16,23 -honeywell,dps:6/92,300,1000,4000,8,3,64,38,30 -honeywell,dps:6/96,300,1000,16000,8,2,112,38,73 -honeywell,dps:7/35,330,1000,2000,0,1,2,16,20 -honeywell,dps:7/45,330,1000,4000,0,3,6,22,25 -honeywell,dps:7/55,140,2000,4000,0,3,6,29,28 -honeywell,dps:7/65,140,2000,4000,0,4,8,40,29 -honeywell,dps:8/44,140,2000,4000,8,1,20,35,32 -honeywell,dps:8/49,140,2000,32000,32,1,20,134,175 -honeywell,dps:8/50,140,2000,8000,32,1,54,66,57 -honeywell,dps:8/52,140,2000,32000,32,1,54,141,181 -honeywell,dps:8/62,140,2000,32000,32,1,54,189,181 -honeywell,dps:8/20,140,2000,4000,8,1,20,22,32 -ibm,3033:s,57,4000,16000,1,6,12,132,82 -ibm,3033:u,57,4000,24000,64,12,16,237,171 -ibm,3081,26,16000,32000,64,16,24,465,361 -ibm,3081:d,26,16000,32000,64,8,24,465,350 -ibm,3083:b,26,8000,32000,0,8,24,277,220 -ibm,3083:e,26,8000,16000,0,8,16,185,113 -ibm,370/125-2,480,96,512,0,1,1,6,15 -ibm,370/148,203,1000,2000,0,1,5,24,21 -ibm,370/158-3,115,512,6000,16,1,6,45,35 -ibm,38/3,1100,512,1500,0,1,1,7,18 -ibm,38/4,1100,768,2000,0,1,1,13,20 -ibm,38/5,600,768,2000,0,1,1,16,20 -ibm,38/7,400,2000,4000,0,1,1,32,28 -ibm,38/8,400,4000,8000,0,1,1,32,45 -ibm,4321,900,1000,1000,0,1,2,11,18 -ibm,4331-1,900,512,1000,0,1,2,11,17 -ibm,4331-11,900,1000,4000,4,1,2,18,26 -ibm,4331-2,900,1000,4000,8,1,2,22,28 -ibm,4341,900,2000,4000,0,3,6,37,28 -ibm,4341-1,225,2000,4000,8,3,6,40,31 -ibm,4341-10,225,2000,4000,8,3,6,34,31 -ibm,4341-11,180,2000,8000,8,1,6,50,42 -ibm,4341-12,185,2000,16000,16,1,6,76,76 -ibm,4341-2,180,2000,16000,16,1,6,66,76 -ibm,4341-9,225,1000,4000,2,3,6,24,26 -ibm,4361-4,25,2000,12000,8,1,4,49,59 -ibm,4361-5,25,2000,12000,16,3,5,66,65 -ibm,4381-1,17,4000,16000,8,6,12,100,101 -ibm,4381-2,17,4000,16000,32,6,12,133,116 -ibm,8130-a,1500,768,1000,0,0,0,12,18 -ibm,8130-b,1500,768,2000,0,0,0,18,20 -ibm,8140,800,768,2000,0,0,0,20,20 -ipl,4436,50,2000,4000,0,3,6,27,30 -ipl,4443,50,2000,8000,8,3,6,45,44 -ipl,4445,50,2000,8000,8,1,6,56,44 -ipl,4446,50,2000,16000,24,1,6,70,82 -ipl,4460,50,2000,16000,24,1,6,80,82 -ipl,4480,50,8000,16000,48,1,10,136,128 -magnuson,m80/30,100,1000,8000,0,2,6,16,37 -magnuson,m80/31,100,1000,8000,24,2,6,26,46 -magnuson,m80/32,100,1000,8000,24,3,6,32,46 -magnuson,m80/42,50,2000,16000,12,3,16,45,80 -magnuson,m80/43,50,2000,16000,24,6,16,54,88 -magnuson,m80/44,50,2000,16000,24,6,16,65,88 -microdata,seq.ms/3200,150,512,4000,0,8,128,30,33 -nas,as/3000,115,2000,8000,16,1,3,50,46 -nas,as/3000-n,115,2000,4000,2,1,5,40,29 -nas,as/5000,92,2000,8000,32,1,6,62,53 -nas,as/5000-e,92,2000,8000,32,1,6,60,53 -nas,as/5000-n,92,2000,8000,4,1,6,50,41 -nas,as/6130,75,4000,16000,16,1,6,66,86 -nas,as/6150,60,4000,16000,32,1,6,86,95 -nas,as/6620,60,2000,16000,64,5,8,74,107 -nas,as/6630,60,4000,16000,64,5,8,93,117 -nas,as/6650,50,4000,16000,64,5,10,111,119 -nas,as/7000,72,4000,16000,64,8,16,143,120 -nas,as/7000-n,72,2000,8000,16,6,8,105,48 -nas,as/8040,40,8000,16000,32,8,16,214,126 -nas,as/8050,40,8000,32000,64,8,24,277,266 -nas,as/8060,35,8000,32000,64,8,24,370,270 -nas,as/9000-dpc,38,16000,32000,128,16,32,510,426 -nas,as/9000-n,48,4000,24000,32,8,24,214,151 -nas,as/9040,38,8000,32000,64,8,24,326,267 -nas,as/9060,30,16000,32000,256,16,24,510,603 -ncr,v8535:ii,112,1000,1000,0,1,4,8,19 -ncr,v8545:ii,84,1000,2000,0,1,6,12,21 -ncr,v8555:ii,56,1000,4000,0,1,6,17,26 -ncr,v8565:ii,56,2000,6000,0,1,8,21,35 -ncr,v8565:ii-e,56,2000,8000,0,1,8,24,41 -ncr,v8575:ii,56,4000,8000,0,1,8,34,47 -ncr,v8585:ii,56,4000,12000,0,1,8,42,62 -ncr,v8595:ii,56,4000,16000,0,1,8,46,78 -ncr,v8635,38,4000,8000,32,16,32,51,80 -ncr,v8650,38,4000,8000,32,16,32,116,80 -ncr,v8655,38,8000,16000,64,4,8,100,142 -ncr,v8665,38,8000,24000,160,4,8,140,281 -ncr,v8670,38,4000,16000,128,16,32,212,190 -nixdorf,8890/30,200,1000,2000,0,1,2,25,21 -nixdorf,8890/50,200,1000,4000,0,1,4,30,25 -nixdorf,8890/70,200,2000,8000,64,1,5,41,67 -perkin-elmer,3205,250,512,4000,0,1,7,25,24 -perkin-elmer,3210,250,512,4000,0,4,7,50,24 -perkin-elmer,3230,250,1000,16000,1,1,8,50,64 -prime,50-2250,160,512,4000,2,1,5,30,25 -prime,50-250-ii,160,512,2000,2,3,8,32,20 -prime,50-550-ii,160,1000,4000,8,1,14,38,29 -prime,50-750-ii,160,1000,8000,16,1,14,60,43 -prime,50-850-ii,160,2000,8000,32,1,13,109,53 -siemens,7.521,240,512,1000,8,1,3,6,19 -siemens,7.531,240,512,2000,8,1,5,11,22 -siemens,7.536,105,2000,4000,8,3,8,22,31 -siemens,7.541,105,2000,6000,16,6,16,33,41 -siemens,7.551,105,2000,8000,16,4,14,58,47 -siemens,7.561,52,4000,16000,32,4,12,130,99 -siemens,7.865-2,70,4000,12000,8,6,8,75,67 -siemens,7.870-2,59,4000,12000,32,6,12,113,81 -siemens,7.872-2,59,8000,16000,64,12,24,188,149 -siemens,7.875-2,26,8000,24000,32,8,16,173,183 -siemens,7.880-2,26,8000,32000,64,12,16,248,275 -siemens,7.881-2,26,8000,32000,128,24,32,405,382 -sperry,1100/61-h1,116,2000,8000,32,5,28,70,56 -sperry,1100/81,50,2000,32000,24,6,26,114,182 -sperry,1100/82,50,2000,32000,48,26,52,208,227 -sperry,1100/83,50,2000,32000,112,52,104,307,341 -sperry,1100/84,50,4000,32000,112,52,104,397,360 -sperry,1100/93,30,8000,64000,96,12,176,915,919 -sperry,1100/94,30,8000,64000,128,12,176,1150,978 -sperry,80/3,180,262,4000,0,1,3,12,24 -sperry,80/4,180,512,4000,0,1,3,14,24 -sperry,80/5,180,262,4000,0,1,3,18,24 -sperry,80/6,180,512,4000,0,1,3,21,24 -sperry,80/8,124,1000,8000,0,1,8,42,37 -sperry,90/80-model-3,98,1000,8000,32,2,8,46,50 -sratus,32,125,2000,8000,0,2,14,52,41 -wang,vs-100,480,512,8000,32,0,0,67,47 -wang,vs-90,480,1000,4000,0,0,0,45,25 diff --git a/ml-xgboost/demo/regression/machine.names b/ml-xgboost/demo/regression/machine.names deleted file mode 100644 index f19a218..0000000 --- a/ml-xgboost/demo/regression/machine.names +++ /dev/null @@ -1,72 +0,0 @@ -1. Title: Relative CPU Performance Data - -2. Source Information - -- Creators: Phillip Ein-Dor and Jacob Feldmesser - -- Ein-Dor: Faculty of Management; Tel Aviv University; Ramat-Aviv; - Tel Aviv, 69978; Israel - -- Donor: David W. Aha (aha@ics.uci.edu) (714) 856-8779 - -- Date: October, 1987 - -3. Past Usage: - 1. Ein-Dor and Feldmesser (CACM 4/87, pp 308-317) - -- Results: - -- linear regression prediction of relative cpu performance - -- Recorded 34% average deviation from actual values - 2. Kibler,D. & Aha,D. (1988). Instance-Based Prediction of - Real-Valued Attributes. In Proceedings of the CSCSI (Canadian - AI) Conference. - -- Results: - -- instance-based prediction of relative cpu performance - -- similar results; no transformations required - - Predicted attribute: cpu relative performance (numeric) - -4. Relevant Information: - -- The estimated relative performance values were estimated by the authors - using a linear regression method. See their article (pp 308-313) for - more details on how the relative performance values were set. - -5. Number of Instances: 209 - -6. Number of Attributes: 10 (6 predictive attributes, 2 non-predictive, - 1 goal field, and the linear regression's guess) - -7. Attribute Information: - 1. vendor name: 30 - (adviser, amdahl,apollo, basf, bti, burroughs, c.r.d, cambex, cdc, dec, - dg, formation, four-phase, gould, honeywell, hp, ibm, ipl, magnuson, - microdata, nas, ncr, nixdorf, perkin-elmer, prime, siemens, sperry, - sratus, wang) - 2. Model Name: many unique symbols - 3. MYCT: machine cycle time in nanoseconds (integer) - 4. MMIN: minimum main memory in kilobytes (integer) - 5. MMAX: maximum main memory in kilobytes (integer) - 6. CACH: cache memory in kilobytes (integer) - 7. CHMIN: minimum channels in units (integer) - 8. CHMAX: maximum channels in units (integer) - 9. PRP: published relative performance (integer) - 10. ERP: estimated relative performance from the original article (integer) - -8. Missing Attribute Values: None - -9. Class Distribution: the class value (PRP) is continuously valued. - PRP Value Range: Number of Instances in Range: - 0-20 31 - 21-100 121 - 101-200 27 - 201-300 13 - 301-400 7 - 401-500 4 - 501-600 2 - above 600 4 - -Summary Statistics: - Min Max Mean SD PRP Correlation - MCYT: 17 1500 203.8 260.3 -0.3071 - MMIN: 64 32000 2868.0 3878.7 0.7949 - MMAX: 64 64000 11796.1 11726.6 0.8630 - CACH: 0 256 25.2 40.6 0.6626 - CHMIN: 0 52 4.7 6.8 0.6089 - CHMAX: 0 176 18.2 26.0 0.6052 - PRP: 6 1150 105.6 160.8 1.0000 - ERP: 15 1238 99.3 154.8 0.9665 - diff --git a/ml-xgboost/demo/regression/mapfeat.py b/ml-xgboost/demo/regression/mapfeat.py deleted file mode 100644 index c747c7b..0000000 --- a/ml-xgboost/demo/regression/mapfeat.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/python - -fo = open( 'machine.txt', 'w' ) -cnt = 6 -fmap = {} -for l in open( 'machine.data' ): - arr = l.split(',') - fo.write(arr[8]) - for i in range( 0,6 ): - fo.write( ' %d:%s' %(i,arr[i+2]) ) - - if arr[0] not in fmap: - fmap[arr[0]] = cnt - cnt += 1 - - fo.write( ' %d:1' % fmap[arr[0]] ) - fo.write('\n') - -fo.close() - -# create feature map for machine data -fo = open('featmap.txt', 'w') -# list from machine.names -names = ['vendor','MYCT', 'MMIN', 'MMAX', 'CACH', 'CHMIN', 'CHMAX', 'PRP', 'ERP' ]; - -for i in range(0,6): - fo.write( '%d\t%s\tint\n' % (i, names[i+1])) - -for v, k in sorted( fmap.items(), key = lambda x:x[1] ): - fo.write( '%d\tvendor=%s\ti\n' % (k, v)) -fo.close() diff --git a/ml-xgboost/demo/regression/mknfold.py b/ml-xgboost/demo/regression/mknfold.py deleted file mode 100644 index a941f86..0000000 --- a/ml-xgboost/demo/regression/mknfold.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/python -import sys -import random - -if len(sys.argv) < 2: - print ('Usage: [nfold = 5]') - exit(0) - -random.seed( 10 ) - -k = int( sys.argv[2] ) -if len(sys.argv) > 3: - nfold = int( sys.argv[3] ) -else: - nfold = 5 - -fi = open( sys.argv[1], 'r' ) -ftr = open( sys.argv[1]+'.train', 'w' ) -fte = open( sys.argv[1]+'.test', 'w' ) -for l in fi: - if random.randint( 1 , nfold ) == k: - fte.write( l ) - else: - ftr.write( l ) - -fi.close() -ftr.close() -fte.close() - diff --git a/ml-xgboost/demo/regression/runexp.sh b/ml-xgboost/demo/regression/runexp.sh deleted file mode 100644 index 900a80c..0000000 --- a/ml-xgboost/demo/regression/runexp.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -# map the data to features. For convenience we only use 7 original attributes and encode them as features in a trivial way -python mapfeat.py -# split train and test -python mknfold.py machine.txt 1 -# training and output the models -../../xgboost machine.conf -# output predictions of test data -../../xgboost machine.conf task=pred model_in=0002.model -# print the boosters of 0002.model in dump.raw.txt -../../xgboost machine.conf task=dump model_in=0002.model name_dump=dump.raw.txt -# print the boosters of 0002.model in dump.nice.txt with feature map -../../xgboost machine.conf task=dump model_in=0002.model fmap=featmap.txt name_dump=dump.nice.txt - -# cat the result -cat dump.nice.txt diff --git a/ml-xgboost/demo/yearpredMSD/README.md b/ml-xgboost/demo/yearpredMSD/README.md deleted file mode 100644 index 3fe3505..0000000 --- a/ml-xgboost/demo/yearpredMSD/README.md +++ /dev/null @@ -1,9 +0,0 @@ -Demonstrating how to use XGBoost on [Year Prediction task of Million Song Dataset](https://archive.ics.uci.edu/ml/datasets/YearPredictionMSD) - -1. Run runexp.sh -```bash -./runexp.sh -``` - -You can also use the script to prepare LIBSVM format, and run the [Distributed Version](../../multi-node). -Note that though that normally you only need to use single machine for dataset at this scale, and use distributed version for larger scale dataset. diff --git a/ml-xgboost/demo/yearpredMSD/csv2libsvm.py b/ml-xgboost/demo/yearpredMSD/csv2libsvm.py deleted file mode 100644 index d7c1d15..0000000 --- a/ml-xgboost/demo/yearpredMSD/csv2libsvm.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/python -import sys - -if len(sys.argv) < 3: - print 'Usage: ' - print 'convert a all numerical csv to libsvm' - -fo = open(sys.argv[2], 'w') -for l in open(sys.argv[1]): - arr = l.split(',') - fo.write('%s' % arr[0]) - for i in xrange(len(arr) - 1): - fo.write(' %d:%s' % (i, arr[i+1])) -fo.close() diff --git a/ml-xgboost/demo/yearpredMSD/runexp.sh b/ml-xgboost/demo/yearpredMSD/runexp.sh deleted file mode 100644 index 8853c3f..0000000 --- a/ml-xgboost/demo/yearpredMSD/runexp.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -if [ -f YearPredictionMSD.txt ] -then - echo "use existing data to run experiment" -else - echo "getting data from uci, make sure you are connected to internet" - wget https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt.zip - unzip YearPredictionMSD.txt.zip -fi -echo "start making data.." -# map feature using indicator encoding, also produce featmap.txt -python csv2libsvm.py YearPredictionMSD.txt yearpredMSD.libsvm -head -n 463715 yearpredMSD.libsvm > yearpredMSD.libsvm.train -tail -n 51630 yearpredMSD.libsvm > yearpredMSD.libsvm.test -echo "finish making the data" -../../xgboost yearpredMSD.conf diff --git a/ml-xgboost/demo/yearpredMSD/yearpredMSD.conf b/ml-xgboost/demo/yearpredMSD/yearpredMSD.conf deleted file mode 100644 index 36cdf39..0000000 --- a/ml-xgboost/demo/yearpredMSD/yearpredMSD.conf +++ /dev/null @@ -1,29 +0,0 @@ -# General Parameters, see comment for each definition -# choose the tree booster, can also change to gblinear -booster = gbtree -# this is the only difference with classification, use reg:squarederror to do linear classification -# when labels are in [0,1] we can also use reg:logistic -objective = reg:squarederror - -# Tree Booster Parameters -# step size shrinkage -eta = 1.0 -# minimum loss reduction required to make a further partition -gamma = 1.0 -# minimum sum of instance weight(hessian) needed in a child -min_child_weight = 1 -# maximum depth of a tree -max_depth = 5 - -base_score = 2001 -# Task parameters -# the number of round to do boosting -num_round = 100 -# 0 means do not save any model except the final round model -save_period = 0 -# The path of training data -data = "yearpredMSD.libsvm.train" -# The path of validation data, used to monitor training process, here [test] sets name of the validation set -eval[test] = "yearpredMSD.libsvm.test" -# The path of test data -#test:data = "yearpredMSD.libsvm.test" diff --git a/ml-xgboost/dev/query_contributors.py b/ml-xgboost/dev/query_contributors.py deleted file mode 100644 index 6b7ee3c..0000000 --- a/ml-xgboost/dev/query_contributors.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Query list of all contributors and reviewers in a release""" - -from sh.contrib import git -import sys -import re -import requests -import json - -if len(sys.argv) != 5: - print(f'Usage: {sys.argv[0]} [starting commit/tag] [ending commit/tag] [GitHub username] [GitHub password]') - sys.exit(1) - -from_commit = sys.argv[1] -to_commit = sys.argv[2] -username = sys.argv[3] -password = sys.argv[4] - -contributors = set() -reviewers = set() - -for line in git.log(f'{from_commit}..{to_commit}', '--pretty=format:%s', '--reverse'): - m = re.search('\(#([0-9]+)\)$', line.rstrip()) - if m: - pr_id = m.group(1) - print(f'PR #{pr_id}') - - r = requests.get(f'https://api.github.com/repos/dmlc/xgboost/pulls/{pr_id}/commits', auth=(username, password)) - assert r.status_code == requests.codes.ok, f'Code: {r.status_code}, Text: {r.text}' - commit_list = json.loads(r.text) - try: - contributors.update([commit['author']['login'] for commit in commit_list]) - except TypeError: - contributors.update(str(input(f'Error fetching contributors for PR #{pr_id}. Enter it manually, as a space-separated list:')).split(' ')) - - r = requests.get(f'https://api.github.com/repos/dmlc/xgboost/pulls/{pr_id}/reviews', auth=(username, password)) - assert r.status_code == requests.codes.ok, f'Code: {r.status_code}, Text: {r.text}' - review_list = json.loads(r.text) - reviewers.update([x['user']['login'] for x in review_list]) - - r = requests.get(f'https://api.github.com/repos/dmlc/xgboost/issues/{pr_id}/comments', auth=(username, password)) - assert r.status_code == requests.codes.ok, f'Code: {r.status_code}, Text: {r.text}' - comment_list = json.loads(r.text) - reviewers.update([x['user']['login'] for x in comment_list]) - -print('Contributors:', end='') -for x in sorted(contributors): - r = requests.get(f'https://api.github.com/users/{x}', auth=(username, password)) - assert r.status_code == requests.codes.ok, f'Code: {r.status_code}, Text: {r.text}' - user_info = json.loads(r.text) - if user_info['name'] is None: - print(f"@{x}, ", end='') - else: - print(f"{user_info['name']} (@{x}), ", end='') - -print('Reviewers:', end='') -for x in sorted(reviewers): - r = requests.get(f'https://api.github.com/users/{x}', auth=(username, password)) - assert r.status_code == requests.codes.ok, f'Code: {r.status_code}, Text: {r.text}' - user_info = json.loads(r.text) - if user_info['name'] is None: - print(f"@{x}, ", end='') - else: - print(f"{user_info['name']} (@{x}), ", end='') diff --git a/ml-xgboost/dmlc-core/.editorconfig b/ml-xgboost/dmlc-core/.editorconfig deleted file mode 100644 index 17fd3a0..0000000 --- a/ml-xgboost/dmlc-core/.editorconfig +++ /dev/null @@ -1,7 +0,0 @@ -root = true - -[*] -charset=utf-8 -indent_style = space -indent_size = 2 -insert_final_newline = true \ No newline at end of file diff --git a/ml-xgboost/dmlc-core/.gitignore b/ml-xgboost/dmlc-core/.gitignore deleted file mode 100644 index 124d396..0000000 --- a/ml-xgboost/dmlc-core/.gitignore +++ /dev/null @@ -1,48 +0,0 @@ -# Compiled Object files -*.slo -*.lo -*.o -*.obj - -# Precompiled Headers -*.gch -*.pch - -# Compiled Dynamic libraries -*.so -*.dylib -*.dll - -# Fortran module files -*.mod - -# Compiled Static libraries -*.lai -*.la -*.a -*.lib - -# Executables -*.exe -*.out -*.app -*~ -config.mk -*.pyc - -# Vim -*.swp -*.swo -*.swn -*.csv -.vimrc - -# Emacs -.clang_complete -deps -recommonmark -build - -# CLion -.idea -cmake-build-* diff --git a/ml-xgboost/dmlc-core/CMakeLists.txt b/ml-xgboost/dmlc-core/CMakeLists.txt deleted file mode 100644 index 3a2855e..0000000 --- a/ml-xgboost/dmlc-core/CMakeLists.txt +++ /dev/null @@ -1,258 +0,0 @@ -cmake_minimum_required(VERSION 3.2) - -project(dmlc VERSION 0.3 LANGUAGES C CXX) - -if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/build/private/local_config.cmake) - include(${CMAKE_CURRENT_SOURCE_DIR}/build/private/local_config.cmake) -endif() - -set(CMAKE_LOCAL "${PROJECT_SOURCE_DIR}/cmake") -list(APPEND CMAKE_MODULE_PATH ${CMAKE_LOCAL}/Modules) - -include(CheckCXXSymbolExists) -include(cmake/Utils.cmake) - -# Options -dmlccore_option(USE_HDFS "Build with HDFS support" OFF) -dmlccore_option(USE_AZURE "Build with AZURE support" OFF) -dmlccore_option(USE_S3 "Build with S3 support" OFF) -dmlccore_option(USE_OPENMP "Build with OpenMP" ON) -dmlccore_option(USE_CXX14_IF_AVAILABLE "Build with C++14 if the compiler supports it" OFF) -dmlccore_option(GOOGLE_TEST "Build google tests" OFF) -dmlccore_option(INSTALL_DOCUMENTATION "Install documentation" OFF) -dmlccore_option(DMLC_FORCE_SHARED_CRT "Build with dynamic CRT on Windows (/MD)" OFF) -dmlccore_option(DMLC_USE_SANITIZER "Use santizer flags; to specify a custom path for sanitizers, set this variable a value that's not ON or OFF" OFF) -set(DMLC_ENABLED_SANITIZERS "address" "leak" CACHE STRING - "Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are - address, leak and thread.") - -include(CheckCXXCompilerFlag) -if(USE_CXX14_IF_AVAILABLE) - check_cxx_compiler_flag("-std=c++14" SUPPORT_CXX14) -endif() -if(SUPPORT_CXX14) - set(CMAKE_CXX_STANDARD 14) -else() - set(CMAKE_CXX_STANDARD 11) -endif() -set(CMAKE_CXX_EXTENSIONS OFF) - -FILE(GLOB SOURCE "src/*.cc") -FILE(GLOB_RECURSE SOURCE_INCLUDE "include/*") -list(APPEND SOURCE ${SOURCE_INCLUDE}) -list(APPEND SOURCE "src/io/line_split.cc") -list(APPEND SOURCE "src/io/recordio_split.cc") -list(APPEND SOURCE "src/io/indexed_recordio_split.cc") -list(APPEND SOURCE "src/io/input_split_base.cc") -list(APPEND SOURCE "src/io/filesys.cc") -list(APPEND SOURCE "src/io/local_filesys.cc") -if(USE_HDFS) - list(APPEND SOURCE "src/io/hdfs_filesys.cc") -endif() -if(USE_S3) - list(APPEND SOURCE "src/io/s3_filesys.cc") -endif() -if(USE_AZURE) - list(APPEND SOURCE "src/io/azure_filesys.cc") -endif() - -add_library(dmlc ${SOURCE}) - -# Sanitizer -if (DMLC_USE_SANITIZER) - # Older CMake versions have had troubles with Sanitizer - cmake_minimum_required(VERSION 3.12) - include(cmake/Sanitizer.cmake) - enable_sanitizers("${DMLC_ENABLED_SANITIZERS}") -endif (DMLC_USE_SANITIZER) - -# HDFS configurations -if(USE_HDFS) - find_package(HDFS REQUIRED) - find_package(JNI REQUIRED) - target_include_directories(dmlc PRIVATE ${HDFS_INCLUDE_DIR}) - target_link_libraries(dmlc PRIVATE ${HDFS_STATIC_LIB} ${JAVA_JVM_LIBRARY}) - target_compile_definitions(dmlc PRIVATE -DDMLC_USE_HDFS=1) -else() - target_compile_definitions(dmlc PRIVATE -DDMLC_USE_HDFS=0) -endif() -# S3 configurations -if(USE_S3) - find_package(CURL REQUIRED) - target_include_directories(dmlc SYSTEM PRIVATE ${CURL_INCLUDE_DIR}) - target_link_libraries(dmlc PRIVATE ${CURL_LIBRARY}) - - find_package(OpenSSL REQUIRED) - target_include_directories(dmlc SYSTEM PRIVATE ${OPENSSL_INCLUDE_DIR}) - target_link_libraries(dmlc PRIVATE ${OPENSSL_LIBRARY} ${OPENSSL_LIBRARIES} ${OPENSSL_CRYPTO_LIBRARY}) - target_compile_definitions(dmlc PRIVATE -DDMLC_USE_S3=1) -else() - target_compile_definitions(dmlc PRIVATE -DDMLC_USE_S3=0) -endif() -# Azure configurations -if(USE_AZURE) - target_compile_definitions(dmlc PRIVATE -DDMLC_USE_AZURE=1) -else() - target_compile_definitions(dmlc PRIVATE -DDMLC_USE_AZURE=0) -endif() - -# OpenMP -if(USE_OPENMP) - if(APPLE AND (NOT CMAKE_COMPILER_IS_GNUCC)) - # Require CMake 3.16+ for Mac to ensure that OpenMP can be located - # (Exception: it's okay if Homebrew GCC is used) - cmake_minimum_required(VERSION 3.16) - endif() - - find_package(OpenMP REQUIRED) - - # For CMake < 3.9, we need to make target OpenMP::OpenMP_CXX ourselves - if(NOT TARGET OpenMP::OpenMP_CXX) - find_package(Threads REQUIRED) - add_library(OpenMP::OpenMP_CXX IMPORTED INTERFACE) - set_property(TARGET OpenMP::OpenMP_CXX - PROPERTY INTERFACE_COMPILE_OPTIONS ${OpenMP_CXX_FLAGS}) - set_property(TARGET OpenMP::OpenMP_CXX - PROPERTY INTERFACE_LINK_LIBRARIES ${OpenMP_CXX_FLAGS} Threads::Threads) - endif() - target_link_libraries(dmlc PRIVATE OpenMP::OpenMP_CXX) -endif() - -if(WIN32 AND (NOT MSVC)) # On Windows, link Shlwapi.lib for non-MSVC compilers - target_link_libraries(dmlc PRIVATE Shlwapi) -endif() - -# Check location of clock_gettime; if it's in librt, link it -include(CheckLibraryExists) -CHECK_LIBRARY_EXISTS(rt clock_gettime "time.h" HAVE_CLOCK_GETTIME_IN_LIBRT) -if(HAVE_CLOCK_GETTIME_IN_LIBRT) - target_link_libraries(dmlc PRIVATE rt) -endif() - -# Check headers and symbols -include(CheckSymbolExists) -include(CheckIncludeFile) -include(CheckIncludeFileCXX) -check_symbol_exists(fopen64 stdio.h DMLC_FOPEN_64_PRESENT) -check_include_file_cxx(cxxabi.h DMLC_CXXABI_H_PRESENT) -check_symbol_exists(nanosleep time.h DMLC_NANOSLEEP_PRESENT) - -# Check existence of backtrace(3) -find_package(Backtrace) -if(Backtrace_FOUND) - set(DMLC_EXECINFO_H_PRESENT 1) - set(DMLC_EXECINFO_H ${Backtrace_HEADER}) - target_include_directories(dmlc SYSTEM PRIVATE ${Backtrace_INCLUDE_DIRS}) - target_link_libraries(dmlc PRIVATE ${Backtrace_LIBRARIES}) -else() - set(DMLC_EXECINFO_H_PRESENT 0) -endif() - -# Check endianness -include(TestBigEndian) -test_big_endian(BIG_ENDIAN) -if(BIG_ENDIAN) - set(DMLC_CMAKE_LITTLE_ENDIAN 0) -else() - set(DMLC_CMAKE_LITTLE_ENDIAN 1) -endif() - -message(STATUS "${CMAKE_LOCAL}/build_config.h.in -> include/dmlc/build_config.h") -configure_file("cmake/build_config.h.in" "include/dmlc/build_config.h") - -target_include_directories(dmlc PUBLIC - $ - $ - $) -target_compile_definitions(dmlc PRIVATE -D_XOPEN_SOURCE=700 - -D_POSIX_SOURCE -D_POSIX_C_SOURCE=200809L -D_DARWIN_C_SOURCE) -# Older stdc++ enable c++11 items -target_compile_definitions(dmlc PUBLIC -D__USE_XOPEN2K8) -# DMLC_CORE_USE_CMAKE macro constant indicates the use of CMake -target_compile_definitions(dmlc PUBLIC -DDMLC_CORE_USE_CMAKE) - -# compiler flags -if(MSVC) - target_compile_definitions(dmlc PUBLIC -DDMLC_USE_CXX11=1) - if(NOT BUILD_SHARED_LIBS AND NOT DMLC_FORCE_SHARED_CRT) - foreach(flag_var - CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE - CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) - if(${flag_var} MATCHES "/MD") - string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") - endif(${flag_var} MATCHES "/MD") - endforeach(flag_var) - endif() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") -else() - set(CMAKE_POSITION_INDEPENDENT_CODE ON) - check_cxx_compiler_flag("-msse2" SUPPORT_MSSE2) - if(SUPPORT_MSSE2) - target_compile_options(dmlc PRIVATE -mavx) - endif() - target_compile_options(dmlc PRIVATE -Wall -Wno-unknown-pragmas -fPIC) - if(CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "Debug") - target_compile_options(dmlc PRIVATE -g -O0) - else() - target_compile_options(dmlc PRIVATE -O3) - endif() - - target_compile_definitions(dmlc PUBLIC -DDMLC_USE_CXX11=1) - if(SUPPORT_CXX14) - target_compile_definitions(dmlc PUBLIC -DDMLC_USE_CXX14=1) - endif() -endif() - - -include(GNUInstallDirs) -# ---[ Install Includes -install(DIRECTORY ${PROJECT_SOURCE_DIR}/include/dmlc - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/include/dmlc/build_config.h - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dmlc) - -# ---[ Install the archive static lib and header files -install(TARGETS dmlc - EXPORT DMLCTargets - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) -install(EXPORT DMLCTargets - FILE DMLCTargets.cmake - NAMESPACE dmlc:: - EXPORT_LINK_INTERFACE_LIBRARIES - DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/dmlc) - -# ---[ Install documentation -if(INSTALL_DOCUMENTATION) - install(DIRECTORY doc DESTINATION ${CMAKE_INSTALL_DATADIR}) -endif() - -# ---[ Package configurations -include(CMakePackageConfigHelpers) -configure_package_config_file( - ${CMAKE_LOCAL}/dmlc-config.cmake.in - ${CMAKE_BINARY_DIR}/cmake/dmlc-config.cmake - INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/dmlc) -write_basic_package_version_file( - ${CMAKE_BINARY_DIR}/cmake/dmlc-config-version.cmake - VERSION ${DMLC_VERSION} - COMPATIBILITY AnyNewerVersion) -install( - FILES - ${CMAKE_BINARY_DIR}/cmake/dmlc-config.cmake - ${CMAKE_BINARY_DIR}/cmake/dmlc-config-version.cmake - DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/dmlc) - -# ---[ Linter target -if(MSVC) - find_package(PythonInterp) - set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "Path to the python 2.x executable") -endif() -set(LINT_DIRS include src scripts) -add_custom_target(dmlc_lint COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC} -DPYTHON_EXECUTABLE=${PYTHON_EXECUTABLE} -DPROJECT_SOURCE_DIR=${PROJECT_SOURCE_DIR} -DLINT_DIRS=${LINT_DIRS} -DPROJECT_NAME=dmlc -P ${PROJECT_SOURCE_DIR}/cmake/lint.cmake) - -# Setup testing -if(GOOGLE_TEST) - include(CTest) - add_subdirectory(test/unittest) -endif() diff --git a/ml-xgboost/dmlc-core/LICENSE b/ml-xgboost/dmlc-core/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/ml-xgboost/dmlc-core/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ml-xgboost/dmlc-core/Makefile b/ml-xgboost/dmlc-core/Makefile deleted file mode 100644 index d920f58..0000000 --- a/ml-xgboost/dmlc-core/Makefile +++ /dev/null @@ -1,108 +0,0 @@ -ifndef config - ifneq ("$(wildcard ./config.mk)","") - config = config.mk - else - config = make/config.mk - endif -endif -# use customized config file -include $(config) -include make/dmlc.mk - -NOLINT_FILES = --exclude_path include/dmlc/concurrentqueue.h include/dmlc/blockingconcurrentqueue.h - -# this is the common build script for dmlc lib -export LDFLAGS= -pthread -lm -export CFLAGS = -O3 -Wall -Wno-unknown-pragmas -Iinclude -ifeq ($(USE_GNU11), 1) - CFLAGS += -std=gnu++11 -else - CFLAGS += -std=c++0x -endif -LDFLAGS+= $(DMLC_LDFLAGS) $(ADD_LDFLAGS) -CFLAGS+= $(DMLC_CFLAGS) $(ADD_CFLAGS) - -ifndef USE_SSE - USE_SSE = 1 -endif - -# ifeq ($(USE_SSE), 1) -# CFLAGS += -mavx -# endif - - -ifdef DEPS_PATH -CFLAGS+= -I$(DEPS_PATH)/include -LDFLAGS+= -L$(DEPS_PATH)/lib -endif - -.PHONY: clean all test lint doc example pylint - -OBJ=line_split.o indexed_recordio_split.o recordio_split.o input_split_base.o io.o filesys.o local_filesys.o data.o recordio.o config.o - -ifeq ($(USE_HDFS), 1) - OBJ += hdfs_filesys.o -endif - -ifeq ($(USE_S3), 1) - OBJ += s3_filesys.o -endif - -ifeq ($(USE_AZURE), 1) - OBJ += azure_filesys.o -endif - -ifndef LINT_LANG - LINT_LANG="all" -endif - - -ALIB=libdmlc.a -all: $(ALIB) test - -include test/dmlc_test.mk -include example/dmlc_example.mk - -ifeq ($(BUILD_TEST), 1) -test: $(ALL_TEST) -endif - -example: $(ALL_EXAMPLE) - -line_split.o: src/io/line_split.cc -recordio_split.o: src/io/recordio_split.cc -indexed_recordio_split.o: src/io/indexed_recordio_split.cc -input_split_base.o: src/io/input_split_base.cc -filesys.o: src/io/filesys.cc -hdfs_filesys.o: src/io/hdfs_filesys.cc -s3_filesys.o: src/io/s3_filesys.cc -azure_filesys.o: src/io/azure_filesys.cc -local_filesys.o: src/io/local_filesys.cc -io.o: src/io.cc -data.o: src/data.cc -recordio.o: src/recordio.cc -config.o: src/config.cc - -libdmlc.a: $(OBJ) - - -$(BIN) : - $(CXX) $(CFLAGS) -o $@ $(filter %.cpp %.o %.c %.cc %.a, $^) $(LDFLAGS) - -$(OBJ) : - $(CXX) -c $(CFLAGS) -o $@ $(firstword $(filter %.cpp %.c %.cc, $^) ) - -$(ALIB): - $(AR) cr $@ $+ - -lint: - scripts/lint.py dmlc ${LINT_LANG} include src scripts $(NOLINT_FILES) - -pylint: - scripts/lint.py dmlc ${LINT_LANG} tracker/dmlc_tracker - -doxygen: - doxygen doc/Doxyfile - -clean: - $(RM) $(OBJ) $(BIN) $(ALIB) $(ALL_TEST) $(ALL_TEST_OBJ) *~ src/*~ src/*/*~ include/dmlc/*~ test/*~ diff --git a/ml-xgboost/dmlc-core/README.md b/ml-xgboost/dmlc-core/README.md deleted file mode 100644 index e073d75..0000000 --- a/ml-xgboost/dmlc-core/README.md +++ /dev/null @@ -1,45 +0,0 @@ -Distributed Machine Learning Common Codebase -============================================ - -[![Build Status](https://github.com/dmlc/dmlc-core/workflows/continuous%20build/badge.svg)](https://github.com/dmlc/dmlc-core/actions) -[![Documentation Status](https://readthedocs.org/projects/dmlc-core/badge/?version=latest)](http://dmlc-core.readthedocs.org/en/latest/) -[![GitHub license](http://dmlc.github.io/img/apache2.svg)](./LICENSE) - - -DMLC-Core is the backbone library to support all DMLC projects, offers the bricks to build efficient and scalable distributed machine learning libraries. - -Developer Channel [![Join the chat at https://gitter.im/dmlc/dmlc-core](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/dmlc/dmlc-core?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - - -What's New ----------- -* [Note on Parameter Module for Machine Learning](http://dmlc-core.readthedocs.org/en/latest/parameter.html) - - -Contents --------- -* [Documentation and Tutorials](http://dmlc-core.readthedocs.org/en/latest/) -* [Contributing](#contributing) - -Known Issues ------------- -* RecordIO format is not portable across different processor endians. So it is not possible to save RecordIO file on a x86 machine and then load it on a SPARC machine, because x86 is little endian while SPARC is big endian. - - -Contributing ------------- - -Contributing to dmlc-core is welcomed! dmlc-core follows google's C style guide. If you are interested in contributing, take a look at [feature wishlist](https://github.com/dmlc/dmlc-core/labels/feature%20wishlist) and open a new issue if you like to add something. - -* Use of c++11 is allowed, given that the code is macro guarded with ```DMLC_USE_CXX11``` -* Try to introduce minimum dependency when possible - -### CheckList before submit code -* Type ```make lint``` and fix all the style problems. -* Type ```make doc``` and fix all the warnings. - -NOTE ----- -deps: - -libcurl4-openssl-dev diff --git a/ml-xgboost/dmlc-core/appveyor.yml b/ml-xgboost/dmlc-core/appveyor.yml deleted file mode 100644 index 54ba43f..0000000 --- a/ml-xgboost/dmlc-core/appveyor.yml +++ /dev/null @@ -1,134 +0,0 @@ -environment: - matrix: - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 - target: msvc - ver: 2019 - generator: "Visual Studio 16 2019" - configuration: Debug - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 - target: msvc - ver: 2019 - generator: "Visual Studio 16 2019" - configuration: Release - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - target: mingw32 - generator: "MinGW Makefiles" - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 - target: cygwin - generator: "Unix Makefiles" - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - target: mingw - generator: "MinGW Makefiles" - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 - target: msys2 - generator: "Unix Makefiles" - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - target: msvc - ver: 2013 - generator: "Visual Studio 12 2013 Win64" - configuration: Release - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - target: msvc - ver: 2015 - generator: "Visual Studio 14 2015 Win64" - configuration: Debug - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - target: msvc - ver: 2015 - generator: "Visual Studio 14 2015 Win64" - configuration: Release - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 - target: msvc - ver: 2017 - generator: "Visual Studio 15 2017 Win64" - configuration: Debug - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 - target: msvc - ver: 2017 - generator: "Visual Studio 15 2017 Win64" - configuration: Release - -matrix: - fast_finish: true - -platform: - - x64 - -install: - - git submodule update --init --recursive - # Set PATH - - if /i "%target%" == "msys2" set PATH=C:\msys64\mingw64\bin;C:\msys64\usr\bin;%PATH% - - if /i "%target%" == "mingw32" set PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH:C:\Program Files\Git\usr\bin;=% - - if /i "%target%" == "mingw" set PATH=C:\MinGW-w64\x86_64-7.3.0-posix-seh-rt_v5-rev0\mingw64\bin;%PATH:C:\Program Files\Git\usr\bin;=% - - if /i "%target%" == "cygwin" set PATH=C:\cygwin64\bin;C:\cygwin64\usr\bin;%PATH% - # Install packages and show information - - if /i "%target%" == "msys2" ( - gcc -v - ) - - if /i "%target%" == "mingw" ( - gcc -v - ) - - if /i "%target%" == "cygwin" ( - gcc -v - ) - -build_script: - - cd %APPVEYOR_BUILD_FOLDER% - - if /i "%target%" == "msvc" ( - mkdir build_msvc%ver% && - cd build_msvc%ver% && - if /i "%generator%" == "Visual Studio 12 2013 Win64" ( - cmake .. -G"%generator%" -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_CONFIGURATION_TYPES="Release;Debug;" && - msbuild dmlc.sln - ) else if /i "%generator%" == "Visual Studio 16 2019" ( - cmake .. -G"%generator%" -A x64 -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_CONFIGURATION_TYPES="Release;Debug;" -DGOOGLE_TEST=ON && - msbuild dmlc.sln - ) else ( - cmake .. -G"%generator%" -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_CONFIGURATION_TYPES="Release;Debug;" -DGOOGLE_TEST=ON && - msbuild dmlc.sln - ) - ) - - if /i "%target%" == "msys2" ( - mkdir build_msys2 && - cd build_msys2 && - cmake .. -G"%generator%" -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON && - cmake --build . -- -j2 - ) - - if /i "%target%" == "mingw32" ( - mkdir build_mingw32 && - cd build_mingw32 && - cmake .. -G"%generator%" -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON && - cmake --build . -- -j2 - ) - - if /i "%target%" == "mingw" ( - mkdir build_mingw && - cd build_mingw && - cmake .. -G"%generator%" -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON && - cmake --build . -- -j2 - ) - - if /i "%target%" == "cygwin" ( - mkdir build_cygwin && - cd build_cygwin && - cmake .. -G"%generator%" -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DOpenMP_gomp_LIBRARY:FILEPATH=-lgomp && - cmake --build . -- -j2 - ) - -test_script: - - cd %APPVEYOR_BUILD_FOLDER% - - if /i "%target%" == "msvc" ( - if /i not "%generator%" == "Visual Studio 12 2013 Win64" ( - .\build_msvc%ver%\test\unittest\%configuration%\dmlc_unit_tests.exe - ) - ) - - if /i "%target%" == "msys2" ( - .\build_msys2\test\unittest\dmlc_unit_tests.exe - ) - - if /i "%target%" == "mingw32" ( - .\build_mingw32\test\unittest\dmlc_unit_tests.exe - ) - - if /i "%target%" == "mingw" ( - .\build_mingw\test\unittest\dmlc_unit_tests.exe - ) - - if /i "%target%" == "cygwin" ( - .\build_cygwin\test\unittest\dmlc_unit_tests.exe - ) diff --git a/ml-xgboost/dmlc-core/cmake/Modules/FindASan.cmake b/ml-xgboost/dmlc-core/cmake/Modules/FindASan.cmake deleted file mode 100644 index 18f5cde..0000000 --- a/ml-xgboost/dmlc-core/cmake/Modules/FindASan.cmake +++ /dev/null @@ -1,13 +0,0 @@ -set(ASan_LIB_NAME ASan) - -find_library(ASan_LIBRARY - NAMES libasan.so libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0 - PATHS ${DMLC_USE_SANITIZER} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(ASan DEFAULT_MSG - ASan_LIBRARY) - -mark_as_advanced( - ASan_LIBRARY - ASan_LIB_NAME) diff --git a/ml-xgboost/dmlc-core/cmake/Modules/FindHDFS.cmake b/ml-xgboost/dmlc-core/cmake/Modules/FindHDFS.cmake deleted file mode 100644 index dcc547f..0000000 --- a/ml-xgboost/dmlc-core/cmake/Modules/FindHDFS.cmake +++ /dev/null @@ -1,72 +0,0 @@ -# DerivedFrom: https://github.com/cloudera/Impala/blob/cdh5-trunk/cmake_modules/FindHDFS.cmake -# - Find HDFS (hdfs.h and libhdfs.so) -# This module defines -# Hadoop_VERSION, version string of ant if found -# HDFS_INCLUDE_DIR, directory containing hdfs.h -# HDFS_LIBRARIES, location of libhdfs.so -# HDFS_FOUND, whether HDFS is found. -# hdfs_static, imported static hdfs library. - -exec_program(hadoop ARGS version OUTPUT_VARIABLE Hadoop_VERSION - RETURN_VALUE Hadoop_RETURN) - -# currently only looking in HADOOP_HOME -find_path(HDFS_INCLUDE_DIR hdfs.h PATHS - $ENV{HADOOP_HDFS_HOME}/include/ - # make sure we don't accidentally pick up a different version - NO_DEFAULT_PATH -) - -if ("${CMAKE_SIZEOF_VOID_P}" STREQUAL "8") - set(arch_hint "x64") -elseif ("$ENV{LIB}" MATCHES "(amd64|ia64)") - set(arch_hint "x64") -else () - set(arch_hint "x86") -endif() - -message(STATUS "Architecture: ${arch_hint}") - -if ("${arch_hint}" STREQUAL "x64") - set(HDFS_LIB_PATHS $ENV{HADOOP_HDFS_HOME}/lib/native) -else () - set(HDFS_LIB_PATHS $ENV{HADOOP_HDFS_HOME}/lib/native) -endif () - -message(STATUS "HDFS_LIB_PATHS: ${HDFS_LIB_PATHS}") - -find_library(HDFS_LIB NAMES hdfs PATHS - ${HDFS_LIB_PATHS} - # make sure we don't accidentally pick up a different version - NO_DEFAULT_PATH -) - -if (HDFS_LIB) - set(HDFS_FOUND TRUE) - set(HDFS_LIBRARIES ${HDFS_LIB}) - set(HDFS_STATIC_LIB ${HDFS_LIB_PATHS}/${CMAKE_STATIC_LIBRARY_PREFIX}hdfs${CMAKE_STATIC_LIBRARY_SUFFIX}) - - add_library(hdfs_static STATIC IMPORTED) - set_target_properties(hdfs_static PROPERTIES IMPORTED_LOCATION ${HDFS_STATIC_LIB}) - -else () - set(HDFS_FOUND FALSE) -endif () - -if (HDFS_FOUND) - if (NOT HDFS_FIND_QUIETLY) - message(STATUS "${Hadoop_VERSION}") - message(STATUS "HDFS_INCLUDE_DIR: ${HDFS_INCLUDE_DIR}") - message(STATUS "HDFS_LIBRARIES: ${HDFS_LIBRARIES}") - message(STATUS "hdfs_static: ${HDFS_STATIC_LIB}") - endif () -else () - message(FATAL_ERROR "HDFS includes and libraries NOT found." - "(${HDFS_INCLUDE_DIR}, ${HDFS_LIB})") -endif () - -mark_as_advanced( - HDFS_LIBRARIES - HDFS_INCLUDE_DIR - hdfs_static -) diff --git a/ml-xgboost/dmlc-core/cmake/Modules/FindLSan.cmake b/ml-xgboost/dmlc-core/cmake/Modules/FindLSan.cmake deleted file mode 100644 index 0b2e871..0000000 --- a/ml-xgboost/dmlc-core/cmake/Modules/FindLSan.cmake +++ /dev/null @@ -1,13 +0,0 @@ -set(LSan_LIB_NAME lsan) - -find_library(LSan_LIBRARY - NAMES liblsan.so liblsan.so.0 liblsan.so.0.0.0 - PATHS ${DMLC_USE_SANITIZER} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(LSan DEFAULT_MSG - LSan_LIBRARY) - -mark_as_advanced( - LSan_LIBRARY - LSan_LIB_NAME) diff --git a/ml-xgboost/dmlc-core/cmake/Modules/FindTSan.cmake b/ml-xgboost/dmlc-core/cmake/Modules/FindTSan.cmake deleted file mode 100644 index 2403e91..0000000 --- a/ml-xgboost/dmlc-core/cmake/Modules/FindTSan.cmake +++ /dev/null @@ -1,13 +0,0 @@ -set(TSan_LIB_NAME tsan) - -find_library(TSan_LIBRARY - NAMES libtsan.so libtsan.so.0 libtsan.so.0.0.0 - PATHS ${DMLC_USE_SANITIZER} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(TSan DEFAULT_MSG - TSan_LIBRARY) - -mark_as_advanced( - TSan_LIBRARY - TSan_LIB_NAME) diff --git a/ml-xgboost/dmlc-core/cmake/Modules/FindUBSan.cmake b/ml-xgboost/dmlc-core/cmake/Modules/FindUBSan.cmake deleted file mode 100644 index e1b72eb..0000000 --- a/ml-xgboost/dmlc-core/cmake/Modules/FindUBSan.cmake +++ /dev/null @@ -1,13 +0,0 @@ -set(UBSan_LIB_NAME UBSan) - -find_library(UBSan_LIBRARY - NAMES libubsan.so libubsan.so.5 libubsan.so.4 libubsan.so.3 libubsan.so.2 libubsan.so.1 libubsan.so.0 - PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(UBSan DEFAULT_MSG - UBSan_LIBRARY) - -mark_as_advanced( - UBSan_LIBRARY - UBSan_LIB_NAME) diff --git a/ml-xgboost/dmlc-core/cmake/Sanitizer.cmake b/ml-xgboost/dmlc-core/cmake/Sanitizer.cmake deleted file mode 100644 index c1afb14..0000000 --- a/ml-xgboost/dmlc-core/cmake/Sanitizer.cmake +++ /dev/null @@ -1,63 +0,0 @@ -# Set appropriate compiler and linker flags for sanitizers. -# -# Usage of this module: -# enable_sanitizers("address;leak") - -# Add flags -macro(enable_sanitizer sanitizer) - if(${sanitizer} MATCHES "address") - find_package(ASan REQUIRED) - set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=address") - link_libraries(${ASan_LIBRARY}) - - elseif(${sanitizer} MATCHES "thread") - find_package(TSan REQUIRED) - set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=thread") - link_libraries(${TSan_LIBRARY}) - - elseif(${sanitizer} MATCHES "leak") - find_package(LSan REQUIRED) - set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=leak") - link_libraries(${LSan_LIBRARY}) - - elseif(${sanitizer} MATCHES "undefined") - find_package(UBSan REQUIRED) - set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=undefined -fno-sanitize-recover=undefined") - link_libraries(${UBSan_LIBRARY}) - - else() - message(FATAL_ERROR "Santizer ${sanitizer} not supported.") - endif() -endmacro() - -macro(enable_sanitizers SANITIZERS) - # Check sanitizers compatibility. - # Idealy, we should use if(san IN_LIST SANITIZERS) ... endif() - # But I haven't figure out how to make it work. - foreach ( _san ${SANITIZERS} ) - string(TOLOWER ${_san} _san) - if (_san MATCHES "thread") - if (${_use_other_sanitizers}) - message(FATAL_ERROR - "thread sanitizer is not compatible with ${_san} sanitizer.") - endif() - set(_use_thread_sanitizer 1) - else () - if (${_use_thread_sanitizer}) - message(FATAL_ERROR - "${_san} sanitizer is not compatible with thread sanitizer.") - endif() - set(_use_other_sanitizers 1) - endif() - endforeach() - - message("Sanitizers: ${SANITIZERS}") - - foreach( _san ${SANITIZERS} ) - string(TOLOWER ${_san} _san) - enable_sanitizer(${_san}) - endforeach() - message("Sanitizers compile flags: ${SAN_COMPILE_FLAGS}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_COMPILE_FLAGS}") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_COMPILE_FLAGS}") -endmacro() diff --git a/ml-xgboost/dmlc-core/cmake/Utils.cmake b/ml-xgboost/dmlc-core/cmake/Utils.cmake deleted file mode 100644 index 74c21a2..0000000 --- a/ml-xgboost/dmlc-core/cmake/Utils.cmake +++ /dev/null @@ -1,381 +0,0 @@ -################################################################################################ -# Command alias for debugging messages -# Usage: -# dmsg() -function(dmsg) - message(STATUS ${ARGN}) -endfunction() - -################################################################################################ -# Removes duplicates from list(s) -# Usage: -# dmlccore_list_unique( [] [...]) -macro(dmlccore_list_unique) - foreach(__lst ${ARGN}) - if(${__lst}) - list(REMOVE_DUPLICATES ${__lst}) - endif() - endforeach() -endmacro() - -################################################################################################ -# Clears variables from list -# Usage: -# dmlccore_clear_vars() -macro(dmlccore_clear_vars) - foreach(_var ${ARGN}) - unset(${_var}) - endforeach() -endmacro() - -################################################################################################ -# Removes duplicates from string -# Usage: -# dmlccore_string_unique() -function(dmlccore_string_unique __string) - if(${__string}) - set(__list ${${__string}}) - separate_arguments(__list) - list(REMOVE_DUPLICATES __list) - foreach(__e ${__list}) - set(__str "${__str} ${__e}") - endforeach() - set(${__string} ${__str} PARENT_SCOPE) - endif() -endfunction() - -################################################################################################ -# Prints list element per line -# Usage: -# dmlccore_print_list() -function(dmlccore_print_list) - foreach(e ${ARGN}) - message(STATUS ${e}) - endforeach() -endfunction() - -################################################################################################ -# Function merging lists of compiler flags to single string. -# Usage: -# dmlccore_merge_flag_lists(out_variable [] [] ...) -function(dmlccore_merge_flag_lists out_var) - set(__result "") - foreach(__list ${ARGN}) - foreach(__flag ${${__list}}) - string(STRIP ${__flag} __flag) - set(__result "${__result} ${__flag}") - endforeach() - endforeach() - string(STRIP ${__result} __result) - set(${out_var} ${__result} PARENT_SCOPE) -endfunction() - -################################################################################################ -# Converts all paths in list to absolute -# Usage: -# dmlccore_convert_absolute_paths() -function(dmlccore_convert_absolute_paths variable) - set(__dlist "") - foreach(__s ${${variable}}) - get_filename_component(__abspath ${__s} ABSOLUTE) - list(APPEND __list ${__abspath}) - endforeach() - set(${variable} ${__list} PARENT_SCOPE) -endfunction() - -################################################################################################ -# Reads set of version defines from the header file -# Usage: -# dmlccore_parse_header( ..) -macro(dmlccore_parse_header FILENAME FILE_VAR) - set(vars_regex "") - set(__parnet_scope OFF) - set(__add_cache OFF) - foreach(name ${ARGN}) - if("${name}" STREQUAL "PARENT_SCOPE") - set(__parnet_scope ON) - elseif("${name}" STREQUAL "CACHE") - set(__add_cache ON) - elseif(vars_regex) - set(vars_regex "${vars_regex}|${name}") - else() - set(vars_regex "${name}") - endif() - endforeach() - if(EXISTS "${FILENAME}") - file(STRINGS "${FILENAME}" ${FILE_VAR} REGEX "#define[ \t]+(${vars_regex})[ \t]+[0-9]+" ) - else() - unset(${FILE_VAR}) - endif() - foreach(name ${ARGN}) - if(NOT "${name}" STREQUAL "PARENT_SCOPE" AND NOT "${name}" STREQUAL "CACHE") - if(${FILE_VAR}) - if(${FILE_VAR} MATCHES ".+[ \t]${name}[ \t]+([0-9]+).*") - string(REGEX REPLACE ".+[ \t]${name}[ \t]+([0-9]+).*" "\\1" ${name} "${${FILE_VAR}}") - else() - set(${name} "") - endif() - if(__add_cache) - set(${name} ${${name}} CACHE INTERNAL "${name} parsed from ${FILENAME}" FORCE) - elseif(__parnet_scope) - set(${name} "${${name}}" PARENT_SCOPE) - endif() - else() - unset(${name} CACHE) - endif() - endif() - endforeach() -endmacro() - -################################################################################################ -# Reads single version define from the header file and parses it -# Usage: -# dmlccore_parse_header_single_define( ) -function(dmlccore_parse_header_single_define LIBNAME HDR_PATH VARNAME) - set(${LIBNAME}_H "") - if(EXISTS "${HDR_PATH}") - file(STRINGS "${HDR_PATH}" ${LIBNAME}_H REGEX "^#define[ \t]+${VARNAME}[ \t]+\"[^\"]*\".*$" LIMIT_COUNT 1) - endif() - - if(${LIBNAME}_H) - string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MAJOR "${${LIBNAME}_H}") - string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MINOR "${${LIBNAME}_H}") - string(REGEX REPLACE "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_PATCH "${${LIBNAME}_H}") - set(${LIBNAME}_VERSION_MAJOR ${${LIBNAME}_VERSION_MAJOR} ${ARGN} PARENT_SCOPE) - set(${LIBNAME}_VERSION_MINOR ${${LIBNAME}_VERSION_MINOR} ${ARGN} PARENT_SCOPE) - set(${LIBNAME}_VERSION_PATCH ${${LIBNAME}_VERSION_PATCH} ${ARGN} PARENT_SCOPE) - set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_MAJOR}.${${LIBNAME}_VERSION_MINOR}.${${LIBNAME}_VERSION_PATCH}" PARENT_SCOPE) - - # append a TWEAK version if it exists: - set(${LIBNAME}_VERSION_TWEAK "") - if("${${LIBNAME}_H}" MATCHES "^.*[ \t]${VARNAME}[ \t]+\"[0-9]+\\.[0-9]+\\.[0-9]+\\.([0-9]+).*$") - set(${LIBNAME}_VERSION_TWEAK "${CMAKE_MATCH_1}" ${ARGN} PARENT_SCOPE) - endif() - if(${LIBNAME}_VERSION_TWEAK) - set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_STRING}.${${LIBNAME}_VERSION_TWEAK}" ${ARGN} PARENT_SCOPE) - else() - set(${LIBNAME}_VERSION_STRING "${${LIBNAME}_VERSION_STRING}" ${ARGN} PARENT_SCOPE) - endif() - endif() -endfunction() - -######################################################################################################## -# An option that the user can select. Can accept condition to control when option is available for user. -# Usage: -# dmlccore_option( "doc string" [IF ]) -function(dmlccore_option variable description value) - set(__value ${value}) - set(__condition "") - set(__varname "__value") - foreach(arg ${ARGN}) - if(arg STREQUAL "IF" OR arg STREQUAL "if") - set(__varname "__condition") - else() - list(APPEND ${__varname} ${arg}) - endif() - endforeach() - unset(__varname) - if("${__condition}" STREQUAL "") - set(__condition 2 GREATER 1) - endif() - - if(${__condition}) - if("${__value}" MATCHES ";") - if(${__value}) - option(${variable} "${description}" ON) - else() - option(${variable} "${description}" OFF) - endif() - elseif(DEFINED ${__value}) - if(${__value}) - option(${variable} "${description}" ON) - else() - option(${variable} "${description}" OFF) - endif() - else() - option(${variable} "${description}" ${__value}) - endif() - else() - unset(${variable} CACHE) - endif() -endfunction() - -################################################################################################ -# Utility macro for comparing two lists. Used for CMake debugging purposes -# Usage: -# dmlccore_compare_lists( [description]) -function(dmlccore_compare_lists list1 list2 desc) - set(__list1 ${${list1}}) - set(__list2 ${${list2}}) - list(SORT __list1) - list(SORT __list2) - list(LENGTH __list1 __len1) - list(LENGTH __list2 __len2) - - if(NOT ${__len1} EQUAL ${__len2}) - message(FATAL_ERROR "Lists are not equal. ${__len1} != ${__len2}. ${desc}") - endif() - - foreach(__i RANGE 1 ${__len1}) - math(EXPR __index "${__i}- 1") - list(GET __list1 ${__index} __item1) - list(GET __list2 ${__index} __item2) - if(NOT ${__item1} STREQUAL ${__item2}) - message(FATAL_ERROR "Lists are not equal. Differ at element ${__index}. ${desc}") - endif() - endforeach() -endfunction() - -################################################################################################ -# Command for disabling warnings for different platforms (see below for gcc and VisualStudio) -# Usage: -# dmlccore_warnings_disable( -Wshadow /wd4996 ..,) -macro(dmlccore_warnings_disable) - set(_flag_vars "") - set(_msvc_warnings "") - set(_gxx_warnings "") - - foreach(arg ${ARGN}) - if(arg MATCHES "^CMAKE_") - list(APPEND _flag_vars ${arg}) - elseif(arg MATCHES "^/wd") - list(APPEND _msvc_warnings ${arg}) - elseif(arg MATCHES "^-W") - list(APPEND _gxx_warnings ${arg}) - endif() - endforeach() - - if(NOT _flag_vars) - set(_flag_vars CMAKE_C_FLAGS CMAKE_CXX_FLAGS) - endif() - - if(MSVC AND _msvc_warnings) - foreach(var ${_flag_vars}) - foreach(warning ${_msvc_warnings}) - set(${var} "${${var}} ${warning}") - endforeach() - endforeach() - elseif((CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANGXX) AND _gxx_warnings) - foreach(var ${_flag_vars}) - foreach(warning ${_gxx_warnings}) - if(NOT warning MATCHES "^-Wno-") - string(REPLACE "${warning}" "" ${var} "${${var}}") - string(REPLACE "-W" "-Wno-" warning "${warning}") - endif() - set(${var} "${${var}} ${warning}") - endforeach() - endforeach() - endif() - dmlccore_clear_vars(_flag_vars _msvc_warnings _gxx_warnings) -endmacro() - -################################################################################################ -# Helper function get current definitions -# Usage: -# dmlccore_get_current_definitions() -function(dmlccore_get_current_definitions definitions_var) - get_property(current_definitions DIRECTORY PROPERTY COMPILE_DEFINITIONS) - set(result "") - - foreach(d ${current_definitions}) - list(APPEND result -D${d}) - endforeach() - - dmlccore_list_unique(result) - set(${definitions_var} ${result} PARENT_SCOPE) -endfunction() - -################################################################################################ -# Helper function get current includes/definitions -# Usage: -# dmlccore_get_current_cflags() -function(dmlccore_get_current_cflags cflags_var) - get_property(current_includes DIRECTORY PROPERTY INCLUDE_DIRECTORIES) - dmlccore_convert_absolute_paths(current_includes) - dmlccore_get_current_definitions(cflags) - - foreach(i ${current_includes}) - list(APPEND cflags "-I${i}") - endforeach() - - dmlccore_list_unique(cflags) - set(${cflags_var} ${cflags} PARENT_SCOPE) -endfunction() - -################################################################################################ -# Helper function to parse current linker libs into link directories, libflags and osx frameworks -# Usage: -# dmlccore_parse_linker_libs( ) -function(dmlccore_parse_linker_libs dmlccore_LINKER_LIBS_variable folders_var flags_var frameworks_var) - - set(__unspec "") - set(__debug "") - set(__optimized "") - set(__framework "") - set(__varname "__unspec") - - # split libs into debug, optimized, unspecified and frameworks - foreach(list_elem ${${dmlccore_LINKER_LIBS_variable}}) - if(list_elem STREQUAL "debug") - set(__varname "__debug") - elseif(list_elem STREQUAL "optimized") - set(__varname "__optimized") - elseif(list_elem MATCHES "^-framework[ \t]+([^ \t].*)") - list(APPEND __framework -framework ${CMAKE_MATCH_1}) - else() - list(APPEND ${__varname} ${list_elem}) - set(__varname "__unspec") - endif() - endforeach() - - # attach debug or optimized libs to unspecified according to current configuration - if(CMAKE_BUILD_TYPE MATCHES "Debug") - set(__libs ${__unspec} ${__debug}) - else() - set(__libs ${__unspec} ${__optimized}) - endif() - - set(libflags "") - set(folders "") - - # convert linker libraries list to link flags - foreach(lib ${__libs}) - if(TARGET ${lib}) - list(APPEND folders $) - list(APPEND libflags -l${lib}) - elseif(lib MATCHES "^-l.*") - list(APPEND libflags ${lib}) - elseif(IS_ABSOLUTE ${lib}) - get_filename_component(name_we ${lib} NAME_WE) - get_filename_component(folder ${lib} PATH) - - string(REGEX MATCH "^lib(.*)" __match ${name_we}) - list(APPEND libflags -l${CMAKE_MATCH_1}) - list(APPEND folders ${folder}) - else() - message(FATAL_ERROR "Logic error. Need to update cmake script") - endif() - endforeach() - - dmlccore_list_unique(libflags folders) - - set(${folders_var} ${folders} PARENT_SCOPE) - set(${flags_var} ${libflags} PARENT_SCOPE) - set(${frameworks_var} ${__framework} PARENT_SCOPE) -endfunction() - -################################################################################################ -# Helper function to detect Darwin version, i.e. 10.8, 10.9, 10.10, .... -# Usage: -# dmlccore_detect_darwin_version() -function(dmlccore_detect_darwin_version output_var) - if(APPLE) - execute_process(COMMAND /usr/bin/sw_vers -productVersion - RESULT_VARIABLE __sw_vers OUTPUT_VARIABLE __sw_vers_out - ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) - - set(${output_var} ${__sw_vers_out} PARENT_SCOPE) - else() - set(${output_var} "" PARENT_SCOPE) - endif() -endfunction() diff --git a/ml-xgboost/dmlc-core/cmake/build_config.h.in b/ml-xgboost/dmlc-core/cmake/build_config.h.in deleted file mode 100644 index d7fc4ea..0000000 --- a/ml-xgboost/dmlc-core/cmake/build_config.h.in +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef DMLC_BUILD_CONFIG_H_ -#define DMLC_BUILD_CONFIG_H_ - -#cmakedefine DMLC_FOPEN_64_PRESENT - -#if !defined(DMLC_FOPEN_64_PRESENT) && DMLC_USE_FOPEN64 - #define fopen64 std::fopen -#endif - -#cmakedefine DMLC_CXXABI_H_PRESENT -#cmakedefine DMLC_EXECINFO_H_PRESENT - -#if (defined DMLC_CXXABI_H_PRESENT) && (defined DMLC_EXECINFO_H_PRESENT) - #ifndef DMLC_LOG_STACK_TRACE - #define DMLC_LOG_STACK_TRACE 1 - #endif - #ifndef DMLC_LOG_STACK_TRACE_SIZE - #define DMLC_LOG_STACK_TRACE_SIZE 10 - #endif - #cmakedefine DMLC_EXECINFO_H <${DMLC_EXECINFO_H}> -#endif - -#cmakedefine DMLC_NANOSLEEP_PRESENT - -#define DMLC_CMAKE_LITTLE_ENDIAN ${DMLC_CMAKE_LITTLE_ENDIAN} - -#endif // DMLC_BUILD_CONFIG_H_ diff --git a/ml-xgboost/dmlc-core/cmake/dmlc-config.cmake.in b/ml-xgboost/dmlc-core/cmake/dmlc-config.cmake.in deleted file mode 100644 index a318d48..0000000 --- a/ml-xgboost/dmlc-core/cmake/dmlc-config.cmake.in +++ /dev/null @@ -1,5 +0,0 @@ -@PACKAGE_INIT@ - -if(NOT TARGET dmlc::dmlc) - include(${CMAKE_CURRENT_LIST_DIR}/DMLCTargets.cmake) -endif() diff --git a/ml-xgboost/dmlc-core/cmake/gtest_cmake.in b/ml-xgboost/dmlc-core/cmake/gtest_cmake.in deleted file mode 100644 index d7acc70..0000000 --- a/ml-xgboost/dmlc-core/cmake/gtest_cmake.in +++ /dev/null @@ -1,15 +0,0 @@ -cmake_minimum_required(VERSION 2.8.2) - -project(googletest-download NONE) - -include(ExternalProject) -ExternalProject_Add(googletest - GIT_REPOSITORY https://github.com/google/googletest.git - GIT_TAG release-1.10.0 - SOURCE_DIR "${CMAKE_BINARY_DIR}/googletest-src" - BINARY_DIR "${CMAKE_BINARY_DIR}/googletest-build" - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - TEST_COMMAND "" -) diff --git a/ml-xgboost/dmlc-core/cmake/lint.cmake b/ml-xgboost/dmlc-core/cmake/lint.cmake deleted file mode 100644 index cb0db2e..0000000 --- a/ml-xgboost/dmlc-core/cmake/lint.cmake +++ /dev/null @@ -1,21 +0,0 @@ -get_filename_component(CMAKE_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/.." ABSOLUTE) -if(NOT MSVC) - set(LINT_COMMAND ${CMAKE_SOURCE_DIR}/scripts/lint.py) -else() - if((NOT PYTHON_EXECUTABLE)) - message(FATAL_ERROR "Cannot lint without python") - endif() - # format output so VS can bring us to the offending file/line - set(LINT_COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_SOURCE_DIR}/scripts/lint.py) -endif() - -cmake_policy(SET CMP0009 NEW) # suppress cmake warning -string(REPLACE " " ";" LINT_DIRS ${LINT_DIRS}) -string(REPLACE " " ";" EXCLUDE_PATH ${EXCLUDE_PATH}) -execute_process( - COMMAND ${LINT_COMMAND} ${PROJECT_NAME} all ${LINT_DIRS} --exclude_path=${EXCLUDE_PATH} - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - ERROR_VARIABLE LINT_OUTPUT - ERROR_STRIP_TRAILING_WHITESPACE -) -message(STATUS ${LINT_OUTPUT}) \ No newline at end of file diff --git a/ml-xgboost/dmlc-core/doc/.gitignore b/ml-xgboost/dmlc-core/doc/.gitignore deleted file mode 100644 index 39742b9..0000000 --- a/ml-xgboost/dmlc-core/doc/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ - -_build -doxygen diff --git a/ml-xgboost/dmlc-core/doc/Doxyfile b/ml-xgboost/dmlc-core/doc/Doxyfile deleted file mode 100644 index 4db428c..0000000 --- a/ml-xgboost/dmlc-core/doc/Doxyfile +++ /dev/null @@ -1,2406 +0,0 @@ -# Doxyfile 1.8.13 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = "dmlc-core" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify a logo or an icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy -# the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = doc/doxygen - -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = YES - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new -# page for each member. If set to NO, the documentation of a member will be part -# of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 8 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. - -ALIASES = - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. -# -# Note: For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up -# to that level are automatically included in the table of contents, even if -# they do not have an id attribute. -# Note: This feature currently applies only to Markdown headings. -# Minimum value: 0, maximum value: 99, default value: 0. -# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. - -TOC_INCLUDE_HEADINGS = 0 - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by putting a % sign in front of the word or -# globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# If one adds a struct or class to a group and this option is enabled, then also -# any nested class or struct is added to the same group. By default this option -# is disabled and one has to add nested compounds explicitly via \ingroup. -# The default value is: NO. - -GROUP_NESTED_COMPOUNDS = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO, -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. If set to YES, local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO, only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO, these declarations will be -# included in the documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO, these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES, the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will -# append additional text to a page's title, such as Class Reference. If set to -# YES the compound reference will be hidden. -# The default value is: NO. - -HIDE_COMPOUND_REFERENCE= NO - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo -# list. This list is created by putting \todo commands in the documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test -# list. This list is created by putting \test commands in the documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES, the -# list will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. -# The default value is: NO. - -WARN_AS_ERROR = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING -# Note: If this tag is empty the current directory is searched. - -INPUT = include - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# read by doxygen. -# -# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, -# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, -# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, -# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, -# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = */test/* \ - logging.h - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefore more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to YES can help to show when doxygen was last run and thus if the -# documentation is up to date. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler (hhc.exe). If non-empty, -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated -# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = http://www.mathjax.org/mathjax - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /