forked from microsoft/onnxruntime
-
Notifications
You must be signed in to change notification settings - Fork 0
/
build.py
2749 lines (2376 loc) · 116 KB
/
build.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import contextlib
import json
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
from pathlib import Path
def version_to_tuple(version: str) -> tuple:
v = []
for s in version.split("."):
with contextlib.suppress(ValueError):
v.append(int(s))
return tuple(v)
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", ".."))
sys.path.insert(0, os.path.join(REPO_DIR, "tools", "python"))
import util.android as android # noqa: E402
from util import get_logger, is_linux, is_macOS, is_windows, run # noqa: E402
log = get_logger("build")
class BaseError(Exception):
"""Base class for errors originating from build.py."""
pass
class BuildError(BaseError):
"""Error from running build steps."""
def __init__(self, *messages):
super().__init__("\n".join(messages))
class UsageError(BaseError):
"""Usage related error."""
def __init__(self, message):
super().__init__(message)
def _check_python_version():
required_minor_version = 7
if (sys.version_info.major, sys.version_info.minor) < (3, required_minor_version):
raise UsageError(
f"Invalid Python version. At least Python 3.{required_minor_version} is required. "
f"Actual Python version: {sys.version}"
)
def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ["true", "false"]:
raise ValueError("Need bool; got %r" % s)
return {"true": True, "false": False}[s.lower()]
_check_python_version()
def _openvino_verify_device_type(device_read):
choices = ["CPU_FP32", "CPU_FP16", "GPU_FP32", "GPU_FP16"]
choices1 = [
"CPU_FP32_NO_PARTITION",
"CPU_FP16_NO_PARTITION",
"GPU_FP32_NO_PARTITION",
"GPU_FP16_NO_PARTITION",
]
status_hetero = True
res = False
if device_read in choices:
res = True
elif device_read in choices1:
res = True
elif device_read.startswith("HETERO:") or device_read.startswith("MULTI:") or device_read.startswith("AUTO:"):
res = True
comma_separated_devices = device_read.split(":")
comma_separated_devices = comma_separated_devices[1].split(",")
if len(comma_separated_devices) < 2:
print("At least two devices required in Hetero/Multi/Auto Mode")
status_hetero = False
dev_options = ["CPU", "GPU"]
for dev in comma_separated_devices:
if dev not in dev_options:
status_hetero = False
break
def invalid_hetero_build():
print("\nIf trying to build Hetero/Multi/Auto, specify the supported devices along with it.\n")
print("specify the keyword HETERO or MULTI or AUTO followed by the devices ")
print("in the order of priority you want to build\n")
print("The different hardware devices that can be added in HETERO or MULTI or AUTO")
print("are ['CPU','GPU'] \n")
print("An example of how to specify the hetero build type. Ex: HETERO:GPU,CPU \n")
print("An example of how to specify the MULTI build type. Ex: MULTI:GPU,CPU \n")
print("An example of how to specify the AUTO build type. Ex: AUTO:GPU,CPU \n")
sys.exit("Wrong Build Type selected")
if res is False:
print("\nYou have selected wrong configuration for the build.")
print("pick the build type for specific Hardware Device from following options: ", choices)
print("(or) from the following options with graph partitioning disabled: ", choices1)
print("\n")
if not (device_read.startswith("HETERO") or device_read.startswith("MULTI") or device_read.startswith("AUTO")):
invalid_hetero_build()
sys.exit("Wrong Build Type selected")
if status_hetero is False:
invalid_hetero_build()
return device_read
def parse_arguments():
class Parser(argparse.ArgumentParser):
# override argument file line parsing behavior - allow multiple arguments per line and handle quotes
def convert_arg_line_to_args(self, arg_line):
return shlex.split(arg_line)
parser = Parser(
description="ONNXRuntime CI build driver.",
usage="""
Default behavior is --update --build --test for native architecture builds.
Default behavior is --update --build for cross-compiled builds.
The Update phase will update git submodules, and run cmake to generate makefiles.
The Build phase will build all projects.
The Test phase will run all unit tests, and optionally the ONNX tests.
Use the individual flags to only run the specified stages.
""",
# files containing arguments can be specified on the command line with "@<filename>" and the arguments within
# will be included at that point
fromfile_prefix_chars="@",
)
# Main arguments
parser.add_argument("--build_dir", required=True, help="Path to the build directory.")
parser.add_argument(
"--config",
nargs="+",
default=["Debug"],
choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
help="Configuration(s) to build.",
)
parser.add_argument("--update", action="store_true", help="Update makefiles.")
parser.add_argument("--build", action="store_true", help="Build.")
parser.add_argument(
"--clean", action="store_true", help="Run 'cmake --build --target clean' for the selected config/s."
)
parser.add_argument(
"--parallel",
nargs="?",
const="0",
default="1",
type=int,
help="Use parallel build. The optional value specifies the maximum number of parallel jobs. "
"If the optional value is 0 or unspecified, it is interpreted as the number of CPUs.",
)
parser.add_argument(
"--nvcc_threads",
nargs="?",
default=-1,
type=int,
help="Maximum number of NVCC threads in each parallel job."
"If the value is unspecified, it will be computed based on available memory and number of parallel jobs.",
)
parser.add_argument("--test", action="store_true", help="Run unit tests.")
parser.add_argument("--skip_tests", action="store_true", help="Skip all tests.")
parser.add_argument(
"--compile_no_warning_as_error",
action="store_true",
help="Preventing warnings from being treated as errors on compile.",
)
# Training options
parser.add_argument("--enable_nvtx_profile", action="store_true", help="Enable NVTX profile in ORT.")
parser.add_argument("--enable_memory_profile", action="store_true", help="Enable memory profile in ORT.")
parser.add_argument(
"--enable_training",
action="store_true",
help="Enable full training functionality in ORT. Includes ORTModule and ORT Training APIs.",
)
parser.add_argument("--enable_training_apis", action="store_true", help="Enable ort training apis.")
parser.add_argument("--enable_training_ops", action="store_true", help="Enable training ops in inference graph.")
parser.add_argument("--enable_nccl", action="store_true", help="Enable Nccl.")
parser.add_argument("--mpi_home", help="Path to MPI installation dir")
parser.add_argument("--nccl_home", help="Path to NCCL installation dir")
parser.add_argument(
"--use_mpi", nargs="?", default=False, const=True, type=_str_to_bool, help="Disabled by default."
)
# enable ONNX tests
parser.add_argument(
"--enable_onnx_tests",
action="store_true",
help="""When running the Test phase, run onnx_test_running against
available test data directories.""",
)
parser.add_argument("--path_to_protoc_exe", help="Path to protoc exe.")
parser.add_argument("--fuzz_testing", action="store_true", help="Enable Fuzz testing of the onnxruntime.")
parser.add_argument(
"--enable_symbolic_shape_infer_tests",
action="store_true",
help="""When running the Test phase, run symbolic shape inference against
available test data directories.""",
)
# generate documentation
parser.add_argument(
"--gen_doc",
nargs="?",
const="yes",
type=str,
help="Generate documentation listing standard ONNX operators and types implemented by "
"various execution providers and contrib operator schemas. Must be used for inference builds, only!"
"Use `--gen_doc validate` to validate these match the current contents in /docs.",
)
parser.add_argument("--gen-api-doc", action="store_true", help="Generate API documentation for PyTorch frontend")
# CUDA related
parser.add_argument("--use_cuda", action="store_true", help="Enable CUDA.")
parser.add_argument(
"--cuda_version", help="The version of CUDA toolkit to use. Auto-detect if not specified. e.g. 9.0"
)
parser.add_argument(
"--cuda_home",
help="Path to CUDA home."
"Read from CUDA_HOME environment variable if --use_cuda is true and "
"--cuda_home is not specified.",
)
parser.add_argument(
"--cudnn_home",
help="Path to CUDNN home. "
"Read from CUDNN_HOME environment variable if --use_cuda is true and "
"--cudnn_home is not specified.",
)
parser.add_argument("--enable_cuda_line_info", action="store_true", help="Enable CUDA line info.")
parser.add_argument("--enable_cuda_nhwc_ops", action="store_true", help="Enable CUDA NHWC ops in build.")
# Python bindings
parser.add_argument("--enable_pybind", action="store_true", help="Enable Python Bindings.")
parser.add_argument("--build_wheel", action="store_true", help="Build Python Wheel.")
parser.add_argument(
"--wheel_name_suffix",
help="Suffix to append to created wheel names. This value is currently only used for nightly builds.",
)
parser.add_argument(
"--numpy_version", help="Installs a specific version of numpy before building the python binding."
)
parser.add_argument("--skip-keras-test", action="store_true", help="Skip tests with Keras if keras is installed")
# C-Sharp bindings
parser.add_argument(
"--build_csharp",
action="store_true",
help="Build C#.Net DLL and NuGet package. This should be only used in CI pipelines. "
"For building C# bindings and packaging them into nuget package use --build_nuget arg.",
)
parser.add_argument(
"--build_nuget",
action="store_true",
help="Build C#.Net DLL and NuGet package on the local machine. "
"Currently only Windows and Linux platforms are supported.",
)
parser.add_argument(
"--msbuild_extra_options",
nargs="+",
action="append",
help="Extra properties to pass to msbuild during build. "
"These are just msbuild /p: options without the leading /p:.",
)
# Java bindings
parser.add_argument("--build_java", action="store_true", help="Build Java bindings.")
# Node.js binding
parser.add_argument("--build_nodejs", action="store_true", help="Build Node.js binding and NPM package.")
# Objective-C binding
parser.add_argument("--build_objc", action="store_true", help="Build Objective-C binding.")
# Build a shared lib
parser.add_argument("--build_shared_lib", action="store_true", help="Build a shared library for the ONNXRuntime.")
# Build a shared lib
parser.add_argument(
"--build_apple_framework", action="store_true", help="Build a macOS/iOS framework for the ONNXRuntime."
)
# Build options
parser.add_argument(
"--cmake_extra_defines",
nargs="+",
action="append",
help="Extra definitions to pass to CMake during build system "
"generation. These are just CMake -D options without the leading -D.",
)
parser.add_argument("--target", help="Build a specific target, e.g. winml_dll")
# This flag is needed when :
# 1. The OS is 64 bits Windows
# 2. And the target binary is for 32 bits Windows
# 3. And the python used for running this script is 64 bits.
# But if you can get a 32 bits python, the build will run better and you won't need this flag.
parser.add_argument(
"--x86",
action="store_true",
help="[cross-compiling] Create Windows x86 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed",
)
parser.add_argument(
"--arm",
action="store_true",
help="[cross-compiling] Create ARM makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed",
)
parser.add_argument(
"--arm64",
action="store_true",
help="[cross-compiling] Create ARM64 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed",
)
parser.add_argument(
"--arm64ec",
action="store_true",
help="[cross-compiling] Create ARM64EC makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed",
)
parser.add_argument(
"--buildasx",
action="store_true",
help="[cross-compiling] Create ARM64X Binary.",
)
parser.add_argument("--msvc_toolset", help="MSVC toolset to use. e.g. 14.11")
parser.add_argument("--windows_sdk_version", help="Windows SDK version to use. e.g. 10.0.19041.0")
parser.add_argument("--android", action="store_true", help="Build for Android")
parser.add_argument(
"--android_abi",
default="arm64-v8a",
choices=["armeabi-v7a", "arm64-v8a", "x86", "x86_64"],
help="Specify the target Android Application Binary Interface (ABI)",
)
parser.add_argument("--android_api", type=int, default=27, help="Android API Level, e.g. 21")
parser.add_argument(
"--android_sdk_path", type=str, default=os.environ.get("ANDROID_HOME", ""), help="Path to the Android SDK"
)
parser.add_argument(
"--android_ndk_path", type=str, default=os.environ.get("ANDROID_NDK_HOME", ""), help="Path to the Android NDK"
)
parser.add_argument(
"--android_cpp_shared",
action="store_true",
help="Build with shared libc++ instead of the default static libc++.",
)
parser.add_argument("--android_run_emulator", action="store_true", help="Start up an Android emulator if needed.")
parser.add_argument("--use_gdk", action="store_true", help="Build with the GDK toolchain.")
parser.add_argument(
"--gdk_edition",
default=os.path.normpath(os.environ.get("GameDKLatest", "")).split(os.sep)[-1], # noqa: SIM112
help="Build with a specific GDK edition. Defaults to the latest installed.",
)
parser.add_argument("--gdk_platform", default="Scarlett", help="Sets the GDK target platform.")
parser.add_argument("--ios", action="store_true", help="build for ios")
parser.add_argument(
"--apple_sysroot", default="", help="Specify the location name of the macOS platform SDK to be used"
)
parser.add_argument(
"--ios_toolchain_file",
default="",
help="Path to ios toolchain file, or cmake/onnxruntime_ios.toolchain.cmake will be used",
)
parser.add_argument(
"--xcode_code_signing_team_id", default="", help="The development team ID used for code signing in Xcode"
)
parser.add_argument(
"--xcode_code_signing_identity", default="", help="The development identity used for code signing in Xcode"
)
parser.add_argument(
"--use_xcode",
action="store_const",
const="Xcode",
dest="cmake_generator",
help="Use Xcode as cmake generator, this is only supported on MacOS. Equivalent to '--cmake_generator Xcode'.",
)
parser.add_argument(
"--osx_arch",
default="arm64" if platform.machine() == "arm64" else "x86_64",
choices=["arm64", "arm64e", "x86_64"],
help="Specify the Target specific architectures for macOS and iOS, This is only supported on MacOS",
)
parser.add_argument(
"--apple_deploy_target",
type=str,
help="Specify the minimum version of the target platform "
"(e.g. macOS or iOS)"
"This is only supported on MacOS",
)
parser.add_argument(
"--disable_memleak_checker", action="store_true", help="Disable memory leak checker from Windows build"
)
# WebAssembly build
parser.add_argument("--build_wasm", action="store_true", help="Build for WebAssembly")
parser.add_argument("--build_wasm_static_lib", action="store_true", help="Build for WebAssembly static library")
parser.add_argument("--emsdk_version", default="3.1.44", help="Specify version of emsdk")
parser.add_argument("--enable_wasm_simd", action="store_true", help="Enable WebAssembly SIMD")
parser.add_argument("--enable_wasm_threads", action="store_true", help="Enable WebAssembly multi-threads support")
parser.add_argument(
"--disable_wasm_exception_catching", action="store_true", help="Disable exception catching in WebAssembly."
)
parser.add_argument(
"--enable_wasm_api_exception_catching", action="store_true", help="Catch exceptions at top level api."
)
parser.add_argument(
"--enable_wasm_exception_throwing_override",
action="store_true",
help="Enable exception throwing in WebAssembly, this will override default disabling exception throwing "
"behavior when disable exceptions.",
)
parser.add_argument("--wasm_run_tests_in_browser", action="store_true", help="Run WebAssembly tests in browser")
parser.add_argument(
"--enable_wasm_profiling", action="store_true", help="Enable WebAssembly profiling and preserve function names"
)
parser.add_argument(
"--enable_wasm_debug_info", action="store_true", help="Build WebAssembly with DWARF format debug info"
)
parser.add_argument("--wasm_malloc", help="Specify memory allocator for WebAssembly")
parser.add_argument(
"--emscripten_settings",
nargs="+",
action="append",
help="Extra emscripten settings to pass to emcc using '-s <key>=<value>' during build.",
)
# Enable onnxruntime-extensions
parser.add_argument(
"--use_extensions",
action="store_true",
help="Enable custom operators in onnxruntime-extensions, use git submodule onnxruntime-extensions "
"in path cmake/external/onnxruntime-extensions by default.",
)
parser.add_argument(
"--extensions_overridden_path",
type=str,
help="Path to pre-pulled onnxruntime-extensions, will override default onnxruntime-extensions path.",
)
# Arguments needed by CI
parser.add_argument("--cmake_path", default="cmake", help="Path to the CMake program.")
parser.add_argument(
"--ctest_path",
default="ctest",
help="Path to the CTest program. It can be an empty string. If it is empty, "
"we will use this script driving the test programs directly.",
)
parser.add_argument(
"--skip_submodule_sync",
action="store_true",
help="Don't do a 'git submodule update'. Makes the Update phase faster.",
)
parser.add_argument("--use_mimalloc", action="store_true", help="Use mimalloc allocator")
parser.add_argument("--use_dnnl", action="store_true", help="Build with DNNL.")
parser.add_argument(
"--dnnl_gpu_runtime", action="store", default="", type=str.lower, help="e.g. --dnnl_gpu_runtime ocl"
)
parser.add_argument(
"--dnnl_opencl_root",
action="store",
default="",
help="Path to OpenCL SDK. "
'e.g. --dnnl_opencl_root "C:/Program Files (x86)/IntelSWTools/sw_dev_tools/OpenCL/sdk"',
)
parser.add_argument(
"--use_openvino",
nargs="?",
const="CPU_FP32",
type=_openvino_verify_device_type,
help="Build with OpenVINO for specific hardware.",
)
parser.add_argument(
"--dnnl_aarch64_runtime", action="store", default="", type=str.lower, help="e.g. --dnnl_aarch64_runtime acl"
)
parser.add_argument(
"--dnnl_acl_root",
action="store",
default="",
help='Path to ACL ROOT DIR. e.g. --dnnl_acl_root "$HOME/ComputeLibrary/"',
)
parser.add_argument("--use_coreml", action="store_true", help="Build with CoreML support.")
parser.add_argument("--use_webnn", action="store_true", help="Build with WebNN support.")
parser.add_argument("--use_snpe", action="store_true", help="Build with SNPE support.")
parser.add_argument("--snpe_root", help="Path to SNPE SDK root.")
parser.add_argument("--use_nnapi", action="store_true", help="Build with NNAPI support.")
parser.add_argument(
"--nnapi_min_api", type=int, help="Minimum Android API level to enable NNAPI, should be no less than 27"
)
parser.add_argument("--use_jsep", action="store_true", help="Build with JavaScript kernels.")
parser.add_argument("--use_qnn", action="store_true", help="Build with QNN support.")
parser.add_argument("--qnn_home", help="Path to QNN SDK dir.")
parser.add_argument("--use_rknpu", action="store_true", help="Build with RKNPU.")
parser.add_argument("--use_preinstalled_eigen", action="store_true", help="Use pre-installed Eigen.")
parser.add_argument("--eigen_path", help="Path to pre-installed Eigen.")
parser.add_argument("--enable_msinternal", action="store_true", help="Enable for Microsoft internal builds only.")
parser.add_argument("--llvm_path", help="Path to llvm dir")
parser.add_argument("--use_vitisai", action="store_true", help="Build with Vitis-AI")
parser.add_argument("--use_tvm", action="store_true", help="Build with TVM")
parser.add_argument("--tvm_cuda_runtime", action="store_true", default=False, help="Build TVM with CUDA support")
parser.add_argument(
"--use_tvm_hash", action="store_true", help="Build ipp-crypto for hash generation. It is used by TVM EP only"
)
parser.add_argument("--use_tensorrt", action="store_true", help="Build with TensorRT")
parser.add_argument(
"--use_tensorrt_builtin_parser", action="store_true", default=True, help="Use TensorRT builtin parser"
)
parser.add_argument("--use_tensorrt_oss_parser", action="store_true", help="Use TensorRT OSS parser")
parser.add_argument("--tensorrt_home", help="Path to TensorRT installation dir")
parser.add_argument("--test_all_timeout", default="10800", help="Set timeout for onnxruntime_test_all")
parser.add_argument("--use_migraphx", action="store_true", help="Build with MIGraphX")
parser.add_argument("--migraphx_home", help="Path to MIGraphX installation dir")
parser.add_argument("--use_full_protobuf", action="store_true", help="Use the full protobuf library")
parser.add_argument(
"--llvm_config",
type=str,
default="",
help="Path to llvm-config.exe for LLVM built from sources. It is strongly needed for build on Windows",
)
parser.add_argument(
"--skip_onnx_tests",
action="store_true",
help="Explicitly disable all onnx related tests. Note: Use --skip_tests to skip all tests.",
)
parser.add_argument("--skip_winml_tests", action="store_true", help="Explicitly disable all WinML related tests")
parser.add_argument("--skip_nodejs_tests", action="store_true", help="Explicitly disable all Node.js binding tests")
parser.add_argument(
"--enable_msvc_static_runtime", action="store_true", help="Enable static linking of MSVC runtimes."
)
parser.add_argument(
"--enable_language_interop_ops",
action="store_true",
help="Enable operator implemented in language other than cpp",
)
parser.add_argument(
"--cmake_generator",
choices=[
"MinGW Makefiles",
"Ninja",
"NMake Makefiles",
"Unix Makefiles",
"Visual Studio 17 2022",
"Xcode",
],
default=None,
help="Specify the generator that CMake invokes.",
)
parser.add_argument("--use_dml", action="store_true", help="Build with DirectML.")
parser.add_argument(
"--dml_path",
type=str,
default="",
help="Path to a custom DirectML installation (must have bin/, lib/, and include/ subdirectories).",
)
parser.add_argument("--use_winml", action="store_true", help="Build with WinML.")
parser.add_argument(
"--winml_root_namespace_override", type=str, help="Specify the namespace that WinML builds into."
)
parser.add_argument(
"--dml_external_project", action="store_true", help="Build with DirectML as an external project."
)
parser.add_argument(
"--use_telemetry", action="store_true", help="Only official builds can set this flag to enable telemetry."
)
parser.add_argument("--enable_wcos", action="store_true", help="Build for Windows Core OS.")
parser.add_argument("--enable_lto", action="store_true", help="Enable Link Time Optimization")
parser.add_argument("--enable_transformers_tool_test", action="store_true", help="Enable transformers tool test")
parser.add_argument(
"--use_acl",
nargs="?",
const="ACL_1905",
choices=["ACL_1902", "ACL_1905", "ACL_1908", "ACL_2002"],
help="Build with ACL for ARM architectures.",
)
parser.add_argument("--acl_home", help="Path to ACL home dir")
parser.add_argument("--acl_libs", help="Path to ACL libraries")
parser.add_argument("--use_armnn", action="store_true", help="Enable ArmNN Execution Provider.")
parser.add_argument(
"--armnn_relu", action="store_true", help="Use the Relu operator implementation from the ArmNN EP."
)
parser.add_argument(
"--armnn_bn", action="store_true", help="Use the Batch Normalization operator implementation from the ArmNN EP."
)
parser.add_argument("--armnn_home", help="Path to ArmNN home dir")
parser.add_argument("--armnn_libs", help="Path to ArmNN libraries")
parser.add_argument("--build_micro_benchmarks", action="store_true", help="Build ONNXRuntime micro-benchmarks.")
# options to reduce binary size
parser.add_argument(
"--minimal_build",
default=None,
nargs="*",
type=str.lower,
help="Create a build that only supports ORT format models. "
"See https://onnxruntime.ai/docs/tutorials/mobile/ for more information. "
"RTTI is automatically disabled in a minimal build. "
"To enable execution providers that compile kernels at runtime (e.g. NNAPI) pass 'extended' "
"as a parameter. e.g. '--minimal_build extended'. "
"To enable support for custom operators pass 'custom_ops' as a parameter. "
"e.g. '--minimal_build custom_ops'. This can be combined with an 'extended' build by passing "
"'--minimal_build extended custom_ops'",
)
parser.add_argument(
"--include_ops_by_config",
type=str,
help="Include ops from config file. See /docs/Reduced_Operator_Kernel_build.md for more information.",
)
parser.add_argument(
"--enable_reduced_operator_type_support",
action="store_true",
help="If --include_ops_by_config is specified, and the configuration file has type reduction "
"information, limit the types individual operators support where possible to further "
"reduce the build size. "
"See /docs/Reduced_Operator_Kernel_build.md for more information.",
)
parser.add_argument("--disable_contrib_ops", action="store_true", help="Disable contrib ops (reduces binary size)")
parser.add_argument(
"--disable_ml_ops", action="store_true", help="Disable traditional ML ops (reduces binary size)"
)
# Please note in our CMakeLists.txt this is already default on. But in this file we reverse it to default OFF.
parser.add_argument("--disable_rtti", action="store_true", help="Disable RTTI (reduces binary size)")
parser.add_argument(
"--disable_types",
nargs="+",
default=[],
choices=["float8", "optional", "sparsetensor"],
help="Disable selected data types (reduces binary size)",
)
parser.add_argument(
"--disable_exceptions",
action="store_true",
help="Disable exceptions to reduce binary size. Requires --minimal_build.",
)
parser.add_argument("--rocm_version", help="The version of ROCM stack to use. ")
parser.add_argument("--use_rocm", action="store_true", help="Build with ROCm")
parser.add_argument("--rocm_home", help="Path to ROCm installation dir")
# Code coverage
parser.add_argument(
"--code_coverage", action="store_true", help="Generate code coverage when targetting Android (only)."
)
# lazy tensor support.
parser.add_argument(
"--enable_lazy_tensor", action="store_true", help="Enable use ORT as backend in Pytorch LazyTensor."
)
parser.add_argument("--ms_experimental", action="store_true", help="Build microsoft experimental operators.")
parser.add_argument(
"--enable_external_custom_op_schemas",
action="store_true",
help="Enable registering user defined custom operation schemas at shared library load time.\
This feature is only supported/available on Ubuntu.",
)
parser.add_argument(
"--external_graph_transformer_path", type=str, help="path to the external graph transformer dir."
)
parser.add_argument(
"--enable_cuda_profiling",
action="store_true",
help="enable cuda kernel profiling, \
cupti library must be added to PATH beforehand.",
)
parser.add_argument("--use_cann", action="store_true", help="Build with CANN")
parser.add_argument("--cann_home", help="Path to CANN installation dir")
parser.add_argument(
"--enable_rocm_profiling",
action="store_true",
help="enable rocm kernel profiling.",
)
parser.add_argument("--use_xnnpack", action="store_true", help="Enable xnnpack EP.")
parser.add_argument("--use_azure", action="store_true", help="Enable azure EP.")
parser.add_argument("--use_cache", action="store_true", help="Use compiler cache in CI")
parser.add_argument("--use_triton_kernel", action="store_true", help="Use triton compiled kernels")
parser.add_argument("--use_lock_free_queue", action="store_true", help="Use lock-free task queue for threadpool.")
if not is_windows():
parser.add_argument(
"--allow_running_as_root",
action="store_true",
help="Allow build to be run as root user. This is not allowed by default.",
)
args = parser.parse_args()
if args.android_sdk_path:
args.android_sdk_path = os.path.normpath(args.android_sdk_path)
if args.android_ndk_path:
args.android_ndk_path = os.path.normpath(args.android_ndk_path)
if args.enable_wasm_api_exception_catching:
# if we catch on api level, we don't want to catch all
args.disable_wasm_exception_catching = True
if not args.disable_wasm_exception_catching or args.enable_wasm_api_exception_catching:
# doesn't make sense to catch if no one throws
args.enable_wasm_exception_throwing_override = True
if args.cmake_generator is None and is_windows():
args.cmake_generator = "Ninja" if args.build_wasm else "Visual Studio 17 2022"
return args
def is_reduced_ops_build(args):
return args.include_ops_by_config is not None
def resolve_executable_path(command_or_path):
"""Returns the absolute path of an executable."""
if command_or_path and command_or_path.strip():
executable_path = shutil.which(command_or_path)
if executable_path is None:
raise BuildError(f"Failed to resolve executable path for '{command_or_path}'.")
return os.path.abspath(executable_path)
else:
return None
def get_linux_distro():
try:
with open("/etc/os-release") as f:
dist_info = dict(line.strip().split("=", 1) for line in f.readlines())
return dist_info.get("NAME", "").strip('"'), dist_info.get("VERSION", "").strip('"')
except (OSError, ValueError):
return "", ""
def is_ubuntu_1604():
dist, ver = get_linux_distro()
return dist == "Ubuntu" and ver.startswith("16.04")
def get_config_build_dir(build_dir, config):
# build directory per configuration
return os.path.join(build_dir, config)
def run_subprocess(
args,
cwd=None,
capture_stdout=False,
dll_path=None,
shell=False,
env=None,
python_path=None,
cuda_home=None,
):
if env is None:
env = {}
if isinstance(args, str):
raise ValueError("args should be a sequence of strings, not a string")
my_env = os.environ.copy()
if dll_path:
if is_windows():
if "PATH" in my_env:
my_env["PATH"] = dll_path + os.pathsep + my_env["PATH"]
else:
my_env["PATH"] = dll_path
else:
if "LD_LIBRARY_PATH" in my_env:
my_env["LD_LIBRARY_PATH"] += os.pathsep + dll_path
else:
my_env["LD_LIBRARY_PATH"] = dll_path
# Add nvcc's folder to PATH env so that our cmake file can find nvcc
if cuda_home:
my_env["PATH"] = os.path.join(cuda_home, "bin") + os.pathsep + my_env["PATH"]
if python_path:
if "PYTHONPATH" in my_env:
my_env["PYTHONPATH"] += os.pathsep + python_path
else:
my_env["PYTHONPATH"] = python_path
my_env.update(env)
log.info(" ".join(args))
return run(*args, cwd=cwd, capture_stdout=capture_stdout, shell=shell, env=my_env)
def update_submodules(source_dir):
run_subprocess(["git", "submodule", "sync", "--recursive"], cwd=source_dir)
run_subprocess(["git", "submodule", "update", "--init", "--recursive"], cwd=source_dir)
def is_docker():
path = "/proc/self/cgroup"
return (
os.path.exists("/.dockerenv")
or os.path.isfile(path)
and any("docker" in line for line in open(path)) # noqa: SIM115
)
def install_python_deps(numpy_version=""):
dep_packages = ["setuptools", "wheel", "pytest"]
dep_packages.append(f"numpy=={numpy_version}" if numpy_version else "numpy>=1.16.6")
dep_packages.append("sympy>=1.10")
dep_packages.append("packaging")
dep_packages.append("cerberus")
run_subprocess([sys.executable, "-m", "pip", "install", *dep_packages])
def setup_test_data(source_onnx_model_dir, dest_model_dir_name, build_dir, configs):
# create the symlink/shortcut of onnx models dir under build_dir
# currently, there're 2 sources of onnx models, one is build in OS image, another is
# from {source_dir}/js/test, which is downloaded from onnx web.
if is_windows():
src_model_dir = os.path.join(build_dir, dest_model_dir_name)
if os.path.exists(source_onnx_model_dir) and not os.path.exists(src_model_dir):
log.debug(f"creating shortcut {source_onnx_model_dir} -> {src_model_dir}")
run_subprocess(["mklink", "/D", "/J", src_model_dir, source_onnx_model_dir], shell=True)
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
dest_model_dir = os.path.join(config_build_dir, dest_model_dir_name)
if os.path.exists(source_onnx_model_dir) and not os.path.exists(dest_model_dir):
log.debug(f"creating shortcut {source_onnx_model_dir} -> {dest_model_dir}")
run_subprocess(["mklink", "/D", "/J", dest_model_dir, source_onnx_model_dir], shell=True)
elif os.path.exists(src_model_dir) and not os.path.exists(dest_model_dir):
log.debug(f"creating shortcut {src_model_dir} -> {dest_model_dir}")
run_subprocess(["mklink", "/D", "/J", dest_model_dir, src_model_dir], shell=True)
else:
src_model_dir = os.path.join(build_dir, dest_model_dir_name)
if os.path.exists(source_onnx_model_dir) and not os.path.exists(src_model_dir):
log.debug(f"create symlink {source_onnx_model_dir} -> {src_model_dir}")
os.symlink(source_onnx_model_dir, src_model_dir, target_is_directory=True)
def use_dev_mode(args):
if args.compile_no_warning_as_error:
return False
if args.use_acl:
return False
if args.use_armnn:
return False
if args.ios and is_macOS():
return False
SYSTEM_COLLECTIONURI = os.getenv("SYSTEM_COLLECTIONURI") # noqa: N806
if SYSTEM_COLLECTIONURI and SYSTEM_COLLECTIONURI != "https://dev.azure.com/onnxruntime/":
return False
return True
def add_default_definition(definition_list, key, default_value):
for x in definition_list:
if x.startswith(key + "="):
return definition_list
definition_list.append(key + "=" + default_value)
def normalize_arg_list(nested_list):
return [i for j in nested_list for i in j] if nested_list else []
def number_of_parallel_jobs(args):
return os.cpu_count() if args.parallel == 0 else args.parallel
def number_of_nvcc_threads(args):
if args.nvcc_threads >= 0:
return args.nvcc_threads
nvcc_threads = 1
try:
import psutil
available_memory = psutil.virtual_memory().available
if isinstance(available_memory, int) and available_memory > 0:
if available_memory > 60 * 1024 * 1024 * 1024:
# When available memory is large enough, chance of OOM is small.
nvcc_threads = 4
else:
# NVCC need a lot of memory to compile 8 flash attention cu files in Linux or 4 cutlass fmha cu files in Windows.
# Here we select number of threads to ensure each thread has enough memory (>= 4 GB). For example,
# Standard_NC4as_T4_v3 has 4 CPUs and 28 GB memory. When parallel=4 and nvcc_threads=2,
# total nvcc threads is 4 * 2, which is barely able to build in 28 GB memory so we will use nvcc_threads=1.
memory_per_thread = 4 * 1024 * 1024 * 1024
fmha_cu_files = 4 if is_windows() else 16
fmha_parallel_jobs = min(fmha_cu_files, number_of_parallel_jobs(args))
nvcc_threads = max(1, int(available_memory / (memory_per_thread * fmha_parallel_jobs)))
print(
f"nvcc_threads={nvcc_threads} to ensure memory per thread >= 4GB for available_memory={available_memory} and fmha_parallel_jobs={fmha_parallel_jobs}"
)
except ImportError:
print(
"Failed to import psutil. Please `pip install psutil` for better estimation of nvcc threads. Use nvcc_threads=1"
)
return nvcc_threads
def generate_build_tree(
cmake_path,
source_dir,
build_dir,
cuda_home,
cudnn_home,
rocm_home,
mpi_home,
nccl_home,
tensorrt_home,
migraphx_home,
acl_home,
acl_libs,
armnn_home,
armnn_libs,
qnn_home,
snpe_root,
cann_home,
path_to_protoc_exe,
configs,
cmake_extra_defines,
args,
cmake_extra_args,
):
log.info("Generating CMake build tree")
cmake_dir = os.path.join(source_dir, "cmake")
cmake_args = [cmake_path, cmake_dir]
if not use_dev_mode(args):
cmake_args += ["--compile-no-warning-as-error"]
types_to_disable = args.disable_types
# enable/disable float 8 types
disable_float8_types = args.android or ("float8" in types_to_disable)
disable_optional_type = "optional" in types_to_disable
disable_sparse_tensors = "sparsetensor" in types_to_disable
cmake_args += [
"-Donnxruntime_RUN_ONNX_TESTS=" + ("ON" if args.enable_onnx_tests else "OFF"),
"-Donnxruntime_GENERATE_TEST_REPORTS=ON",
# There are two ways of locating python C API header file. "find_package(PythonLibs 3.5 REQUIRED)"
# and "find_package(Python 3.5 COMPONENTS Development.Module)". The first one is deprecated and it
# depends on the "PYTHON_EXECUTABLE" variable. The second needs "Python_EXECUTABLE". Here we set both
# of them to get the best compatibility.
"-DPython_EXECUTABLE=" + sys.executable,
"-DPYTHON_EXECUTABLE=" + sys.executable,
"-Donnxruntime_USE_MIMALLOC=" + ("ON" if args.use_mimalloc else "OFF"),
"-Donnxruntime_ENABLE_PYTHON=" + ("ON" if args.enable_pybind else "OFF"),
"-Donnxruntime_BUILD_CSHARP=" + ("ON" if args.build_csharp else "OFF"),
"-Donnxruntime_BUILD_JAVA=" + ("ON" if args.build_java else "OFF"),
"-Donnxruntime_BUILD_NODEJS=" + ("ON" if args.build_nodejs else "OFF"),
"-Donnxruntime_BUILD_OBJC=" + ("ON" if args.build_objc else "OFF"),
"-Donnxruntime_BUILD_SHARED_LIB=" + ("ON" if args.build_shared_lib else "OFF"),
"-Donnxruntime_BUILD_APPLE_FRAMEWORK=" + ("ON" if args.build_apple_framework else "OFF"),
"-Donnxruntime_USE_DNNL=" + ("ON" if args.use_dnnl else "OFF"),
"-Donnxruntime_USE_NNAPI_BUILTIN=" + ("ON" if args.use_nnapi else "OFF"),
"-Donnxruntime_USE_RKNPU=" + ("ON" if args.use_rknpu else "OFF"),
"-Donnxruntime_USE_LLVM=" + ("ON" if args.use_tvm else "OFF"),
"-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=" + ("ON" if args.enable_msinternal else "OFF"),
"-Donnxruntime_USE_VITISAI=" + ("ON" if args.use_vitisai else "OFF"),
"-Donnxruntime_USE_TENSORRT=" + ("ON" if args.use_tensorrt else "OFF"),
"-Donnxruntime_USE_TENSORRT_BUILTIN_PARSER="
+ ("ON" if args.use_tensorrt_builtin_parser and not args.use_tensorrt_oss_parser else "OFF"),