From e2356a0403a3ca6248e4a0223cee14678443232b Mon Sep 17 00:00:00 2001 From: Edward Chen <18449977+edgchen1@users.noreply.github.com> Date: Mon, 2 Dec 2024 17:41:52 -0800 Subject: [PATCH 01/22] Use UTF8 string encoding in ORTSaveCodeAndDescriptionToError(). (#22982) Update from ASCII to UTF8 string encoding when creating the `NSString` description. --- objectivec/error_utils.mm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/objectivec/error_utils.mm b/objectivec/error_utils.mm index 335cf8894d549..e8d4d5bb365c9 100644 --- a/objectivec/error_utils.mm +++ b/objectivec/error_utils.mm @@ -11,7 +11,7 @@ void ORTSaveCodeAndDescriptionToError(int code, const char* descriptionCstr, NSE if (!error) return; NSString* description = [NSString stringWithCString:descriptionCstr - encoding:NSASCIIStringEncoding]; + encoding:NSUTF8StringEncoding]; *error = [NSError errorWithDomain:kOrtErrorDomain code:code From 9ed0c7fe26b69b60487b785a7c57243e15f4985f Mon Sep 17 00:00:00 2001 From: Jian Chen Date: Mon, 2 Dec 2024 21:34:25 -0500 Subject: [PATCH 02/22] Redo "Update Gradle version 8.7 and java version 17 within onnxruntime/java" (#22923) ### Description ### Motivation and Context --- java/build-android.gradle | 6 +- java/build.gradle | 4 +- java/gradle/wrapper/gradle-wrapper.properties | 4 +- java/gradlew.bat | 20 +- java/src/test/android/app/build.gradle | 18 +- js/react_native/android/build.gradle | 7 +- js/react_native/android/gradle.properties | 2 +- .../android/gradle/wrapper/gradle-wrapper.jar | Bin 58910 -> 60756 bytes .../gradle/wrapper/gradle-wrapper.properties | 4 +- js/react_native/android/gradlew | 263 +++++++++++------- js/react_native/android/gradlew.bat | 33 +-- js/react_native/e2e/android/app/build.gradle | 8 +- .../github/android/build_aar_package.py | 4 +- .../default_full_aar_build_settings.json | 4 +- .../templates/react-native-ci.yml | 2 - 15 files changed, 209 insertions(+), 170 deletions(-) diff --git a/java/build-android.gradle b/java/build-android.gradle index d5839f9f27869..9c4275b74f626 100644 --- a/java/build-android.gradle +++ b/java/build-android.gradle @@ -82,7 +82,7 @@ allprojects { } android { - compileSdkVersion 32 + compileSdkVersion 34 defaultConfig { minSdkVersion minSdkVer @@ -108,8 +108,8 @@ android { } compileOptions { - sourceCompatibility = JavaVersion.VERSION_1_8 - targetCompatibility = JavaVersion.VERSION_1_8 + sourceCompatibility = JavaVersion.VERSION_17 + targetCompatibility = JavaVersion.VERSION_17 } sourceSets { diff --git a/java/build.gradle b/java/build.gradle index 34ac93cce6f4e..845121dd17a48 100644 --- a/java/build.gradle +++ b/java/build.gradle @@ -50,8 +50,8 @@ mavenSettings { } java { - sourceCompatibility = JavaVersion.VERSION_1_8 - targetCompatibility = JavaVersion.VERSION_1_8 + sourceCompatibility = JavaVersion.VERSION_17 + targetCompatibility = JavaVersion.VERSION_17 } // This jar tasks serves as a CMAKE signaling diff --git a/java/gradle/wrapper/gradle-wrapper.properties b/java/gradle/wrapper/gradle-wrapper.properties index 4baf5a11d45a3..381baa9cef1ec 100644 --- a/java/gradle/wrapper/gradle-wrapper.properties +++ b/java/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=9631d53cf3e74bfa726893aee1f8994fee4e060c401335946dba2156f440f24c -distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-bin.zip +distributionSha256Sum=544c35d6bd849ae8a5ed0bcea39ba677dc40f49df7d1835561582da2009b961d +distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-bin.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/java/gradlew.bat b/java/gradlew.bat index 93e3f59f135dd..25da30dbdeee9 100644 --- a/java/gradlew.bat +++ b/java/gradlew.bat @@ -43,11 +43,11 @@ set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 if %ERRORLEVEL% equ 0 goto execute -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail @@ -57,11 +57,11 @@ set JAVA_EXE=%JAVA_HOME%/bin/java.exe if exist "%JAVA_EXE%" goto execute -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail diff --git a/java/src/test/android/app/build.gradle b/java/src/test/android/app/build.gradle index ecbc4b90612dd..baf18e714d25c 100644 --- a/java/src/test/android/app/build.gradle +++ b/java/src/test/android/app/build.gradle @@ -7,12 +7,12 @@ def minSdkVer = System.properties.get("minSdkVer")?:24 def qnnVersion = System.properties['qnnVersion'] android { - compileSdkVersion 32 + compileSdkVersion 34 defaultConfig { applicationId "ai.onnxruntime.example.javavalidator" minSdkVersion minSdkVer - targetSdkVersion 32 + targetSdkVersion 34 versionCode 1 versionName "1.0" @@ -34,11 +34,11 @@ android { } } compileOptions { - sourceCompatibility JavaVersion.VERSION_1_8 - targetCompatibility JavaVersion.VERSION_1_8 + sourceCompatibility JavaVersion.VERSION_17 + targetCompatibility JavaVersion.VERSION_17 } kotlinOptions { - jvmTarget = '1.8' + jvmTarget = '17' } // Conditional packagingOptions for QNN builds only if (qnnVersion != null) { @@ -69,11 +69,11 @@ dependencies { implementation 'com.google.android.material:material:1.3.0' implementation 'androidx.constraintlayout:constraintlayout:2.0.4' testImplementation 'junit:junit:4.+' - androidTestImplementation 'androidx.test.ext:junit:1.1.3' - androidTestImplementation 'androidx.test.espresso:espresso-core:3.4.0' + androidTestImplementation "androidx.test.ext:junit:1.1.5" + androidTestImplementation "androidx.test.espresso:espresso-core:3.5.0" - androidTestImplementation 'androidx.test:runner:1.4.0' - androidTestImplementation 'androidx.test:rules:1.4.0' + androidTestImplementation "androidx.test:runner:1.5.2" + androidTestImplementation "androidx.test:rules:1.5.0" androidTestImplementation 'com.microsoft.appcenter:espresso-test-extension:1.4' // dependencies for onnxruntime-android-qnn diff --git a/js/react_native/android/build.gradle b/js/react_native/android/build.gradle index 825990eba0fb8..521866ff0f3e2 100644 --- a/js/react_native/android/build.gradle +++ b/js/react_native/android/build.gradle @@ -7,7 +7,7 @@ buildscript { } dependencies { - classpath 'com.android.tools.build:gradle:4.1.2' + classpath 'com.android.tools.build:gradle:7.4.2' // noinspection DifferentKotlinGradleVersion } } @@ -221,9 +221,8 @@ dependencies { api "com.facebook.react:react-native:" + REACT_NATIVE_VERSION api "org.mockito:mockito-core:2.28.2" - androidTestImplementation "androidx.test:runner:1.1.0" - androidTestImplementation "androidx.test:rules:1.1.0" - + androidTestImplementation "androidx.test:runner:1.5.2" + androidTestImplementation "androidx.test:rules:1.5.0" implementation "junit:junit:4.12" androidTestImplementation "com.linkedin.dexmaker:dexmaker-mockito-inline-extended:2.28.1" diff --git a/js/react_native/android/gradle.properties b/js/react_native/android/gradle.properties index 465b04d1f5813..8fe6e40d76911 100644 --- a/js/react_native/android/gradle.properties +++ b/js/react_native/android/gradle.properties @@ -4,7 +4,7 @@ # Specifies the JVM arguments used for the daemon process. # The setting is particularly useful for tweaking memory settings. # Default value: -Xmx1024m -XX:MaxPermSize=256m -# org.gradle.jvmargs=-Xmx2048m -XX:MaxPermSize=512m -XX:+HeapDumpOnOutOfMemoryError -Dfile.encoding=UTF-8 +org.gradle.jvmargs=-Xmx4096m -XX:+HeapDumpOnOutOfMemoryError -Dfile.encoding=UTF-8 # # When configured, Gradle will run in incubating parallel mode. # This option should only be used with decoupled projects. More details, visit diff --git a/js/react_native/android/gradle/wrapper/gradle-wrapper.jar b/js/react_native/android/gradle/wrapper/gradle-wrapper.jar index 62d4c053550b91381bbd28b1afc82d634bf73a8a..249e5832f090a2944b7473328c07c9755baa3196 100644 GIT binary patch delta 21827 zcmaI6Q*fYN6E&KNIk9cqwr$(C@x-=mTN6)gI}_VBCYk*2`7ch@S9R*#?W)~feY02h zTD^AuG}!V6SR?HZ1SOZQtQr^)5PA#{5So-EVT_dVHO!PqS_Gijr8tVDK%6&qZeU7XO?s53M}(nQWu(T*V4y~Q+IgZu`Cg|- zA^NxO&)4z&XPTQ4T(q8r1kU$+3v^INRW5@oYbsjnN+f1%-qEOBTa80WAz(KZ|xo} zjmJR^sH^9dtu)jPdVc;q{cZ@*{lgFH-^|rx5jfrUv?zo&7@6xf zqo{2J?XSS)LMbs4-JhM+oux%=2gj-LDutG->ubB)2_?)EB{+^WyZB+!7mT1{rLTY= zhBe$m_UQXkTYvIm@mXsLzO;ZaX-sd*8TOU{+u|RaQ4=3OA)fBB{i4Ff0M>x$;G=Ma zcigTy3Omv^$`Tq`q03>8Nu_CI-oZETO1CF?vujdca}q^5VwW%3jU=l>GX0P9$&0ck zdq~l*>>GvgA6Taz%F7GuSNLUoa04^fN57B& zyco@qy^}+xizUm!uOdF30KJ;UbaUDoc=X2i5;;X{GYa;D@a;d{4Jo$4RP>X?9tClm zA6c=cM=%!VTMjMk)s!gqqkA5#*o0Q?bWlKK)^^(tV3HwlK-L%B09T73kG}(|+OA-` z^lVb)kt1ER>-6ZSFd(c;kIq8KC)S!(aj2|HINyl4jgt?mD+-z(UScExUcp0v(;MW7 z^8L9qVV11`VMH~qbKYDhqq0|Re9{>1xW5V8Te9E%M&PXgi&h{f0k3Pc{q6jZ%?}_H zoWB$Olp082{{&B9j-g0t5mkg|jl>CvE}(wv3^&}%z#;L<4oA*icEVHCyrV_v8+8Of z@$FclzI0)mRJ~!yEuXL@E9{#QcM1j)91z>dP$XitO{IHxC-z@Kr}#75o26R^MTDIIu@^Iea}0XB#D?J(~_3 z7`p8Cq4U-63wntR0PH+uXw0Ih;)4~DCi1CH(GY9g!eTZolrQ9m9%L3~7}SPu?8-EC zcLo2{|54{e>ya;Y@!R=eD8mVSi?8FvUqHLI`qMWi=TI0=`Sk{KnuJ zjPi7bc_|V4WAV6OZ4_+Gs@1fbVqp|C;%OwH*_Dv0RWBbc}nZ%#zdQ64Bn# zl?%gu(t1RXAtW~S-c)6?VYP87Jk5m*%|R&;Y&h(SucL~?-dNofI3vkQUv6EhQCS#W z3oQ`_l46?W%km>bXxOW$0R5^Gi^cGDmE6>LTAV8rOKNLot}L95UJ+~aCnj&5ch`>B z%WSQ^g0oQ(0n62u2eV_bKAMLr`Suk=n|uk4rL-}Gb^Tlp-1LJADI<||x41^E5S1Y~ zb7f8!!V(lgB-nf2JU#d&oX%P6hfn>GH-9-3)(&PHu81o8+t8XiaHwuT>63bDqrmKr zMiqXD8pL&!CYDdL1$)zZq8^CPAH%Od164X8Y8J3`VI&}a99NeerQ?-0Io8TFlMB8^ zPoTgFCd2Alz9-gvYLJiKe6@P)uiO%wRLS6os1f{`BeE3zD`Wb2X;VgxhN4R0*j>M3 zi6e%iMl$DI0RDmkh*e}N)fg7o%$!@|Qyy=a*dHV66Y#zA4Zkt|uz&I}?9a`HKwBu^`J~UHFKq*{b z|8(%QtrwJn#0buu?cY8K`bV6=Z;+I8-K42=@Y2A=s@P@?oHj0`784JhgLX2=du7hZ zEd+_s#I?;ll}t~lNl)I2K&+&9G{VWdktxQ&j9D;#q^9vLwWP}@q};;cTh}+ z@l6hvdy{YvPAQxjeFbbmzZXyjBC(adii z&Qv@6@yWf)RPwzzhOqy@*n1CTsjg{ZQ{7+RL3KP~SyibD$TX!~%E$<@B+)$~v!iXJ zk9RI`3`JpEvSmh@x}~d>rRcH8@S3OPjSXPg+4Zu3-J{cJU z;jr?$h8jO&537S132!9su=0}hkqRThWP&SQWwjYCUD2l(^+)^^j9X;yY6%`K6DDmF zbWI~c%|Z}6_!EUmQ~Yfn0+SQ#tP$#s80yWSMXqV)tSK#lL`}#}WDL^JeGf{%jCTVb zIWbwl8Cmj;Jp_lKv~-B7IR9_aAy((h0oez$&~k!{gHa+fbB8PRkWyt$n&-q2{4w{2 z8y+RqHJ^P9$!O#-K0hc$-#eBx6px6u_@};{nutFw*mH>$)(~v)8Ipz>GQ|LuXWNw! z`gXl&#i7zX`e7#MDeVClSzkQQ&#DLFOpR`UIM2`={z&F^H>`&a&eB{vE955?NfPox z@<|Tub!n#hr!Kw~e693;xpM6cW;>bT+fXdPV0cjxX+a{`#s#eC}2O3AI)1&>X zv4t02&WA?qK{~D40-BYA@gsjGWmJ%^e@0_jLuHXKysqSZDQ#%=F-aSY9(2Ww4X!xw z7edknLe+}YVZ?)EO{TTfehQ0Zz8RLF03<<$9o32$Q6)0Unbv-5!0e33Vethrydn5+ zGS`SUyJx;dG)%qiC(l$vm>ieqbJb@}uwy}RTtbQ30RDhNn2h>6hCJ`qsTr8kCK8pb z@!##tV=X#LUX`;%i-aQ8L9JADw-6gCDCPp;{Lq%w2{BD;Odql);(tzY}Z9jw?UjauCZ@ z3t=Pk0QZ{}LQsEEz3bjLq!K8QtBi z?HIxS3jnbHKQ62t+{|4ZjQ^jA|1AynuW0fE6a<740tAHO|1VL;+DX;U+KIu`&e+v8 zOifpHNeJyZ60h(#nzvxWENjsTnd`2P}KL%^4&V5;wNMJi| zXQo_sGLD_Y<1-myd=#K)baM8|VFfvIb@v%NPag}}>N-G02#Gz$UjfkS)tkD+>0Ye0jar=7%=EoiXE!Hdk5ucsnxgz{njwOkA5>#;k5*orMm!FJN=e0&==H= zaYJmmFJLj=^U*DF3Y2_=%zKnr$)*oh4xXs#b8}l^bf4K4m~I*@{>q{^m=LH7ofGa|Nc4 z(^xQDF18*5=BBgx^Guv@!U9hWVE6hdVRGr&KHnIh&nPvse*UD%He0s!iIFWZ@OINn znV^>ZD~`;H5FTEoz9{?ZiwivBXn#2485!Q*8T|wyX;H!ShGH5a*X{bmRNjn(X>Wsm zqHOI~oqVf9zsEI4S8a(q-1Q~XqGOW6@67>0A@?6+Ndqu$3k7n7&BMb(ROcR@u`5p2 zA{TBp$&d~09Ws`oXJ-vdH-AJYEiM)rw&4FThDPvi)$L~k z2Lbs5^&g1;uO91#hDoW0p#*kSan;fOIdJ5JnWL&mQK9JwZQ_8EtJA_-+v*bG;K-1p ziPg-KcOq;uba$)^eTNIYEobzer7U3@@{o$Sm-{be{UiP7vw)qq;4H!aiW1-k%Y~mZ z(aHI`<=T7OeR{P`2>@Tv{j_i6VxW#}#ppweu~I4Q6S?;N+^DDb7658;2azU2c1P#} zMXd3b&}_dhMX?vJi!s54DM5!bJ^QD(;#cRqvO3tx0YwHkx) zhfrYE<9d&8=y>@DIFJw5?3hl>caaul>pI{ulD4rJd}sL-A&yKlZmQ{1K?8~xmQ%={jC-&sMHm8m%MjbPTU5tDgnye~P=vVMAk}U_- z3}`%6_aL^`i?Mf$I)0g9{KgqMvYTM(O0!e~)j1nfhqLFhE5gUeh%a0kq=rYS5eB=} z%9L0bgvo6^13JA|`eVavGufFa`EM7SZiI98BX!)*&RCb&IU6&E+f+$ralPgS|8_X+ zgXwYJ5sSV6YI$O}d-C*KXyiP3|4ywOA?e9!mz!>vL=aaj+_4qbaHWfsec#3Jo#i{o zldd<9J1RY~gsKj4#UWEnG zl8o^+z$HhKQ&zV05z34;10-2 zQG1Y{kvdAve5~~T$iAFMx!2GsKdn)Cm(Fux036D@eb*MSIP*q``Kxw^q)jjsE(-Q5 zk8+nejZm)Nq9UwdjT!Qm&(Us6yv4pD7v6vR<2Q{VqP-y@w_dOq2!X8wNTiaOB`4;@ z@J-O++F07}w zsu$PWW~)t{iS^Vz@0|A@lF$2HrXb*L2u)#bmL-haO>b2!TV7bf^pwv>c1HW*Ggfml z>I+aMiaZ+r?{v-&uG=gE0|5#6ufwqYza0i*6$?mH-*#0MNBh2(Ka+RhWE+;L(yBsX z{!eg=e-?@tmKGX)821&nf^O#IJsmvnc)6OM3m&oZWEW3!37o?tPICqF2)t>&?V%2> zZ?>kC=ArSP-*9*LxxVD?a(BNjJQf5%I^mFmNiySM7pg zLB*G8uI7rVc3GQuVr3-1w@SPB|I|~(q2A>lYyI;MA5fDt^NAwXbCOLis<7gA>3TWN ze!>{emZyy>wuSYT_DWyGjWI?Cy`J&8`3=lOW%n`Q@3Ms5`oMoyA4)YC#n`B$Q0$(c z9T`BOc>>xWEoQy@K4sN3>{HmlUS!LTo1RCUXVHcB zm(33Ufzu5Q#y5(~_8`RJ1nRher;)dPcgJXLoNb%MGq}4y&)Fz-Q7y`7(Hrpsk`xii z;5dL)UBWwHT}k>v`qTCe0^nC2>NBw;)apU!;D5mFLXj*dBx&N~QyHVxJ^QUV(CZ^8 zB?aLd-AmPfi*|te5y)5OIM13pLZ~%T&=JySbl|9VrTJr1C$iP5giHY_R$h z7}19D(p^at7}MEldBWS2IS`YE25sgtkcNi&V-$%GMSGvD`R}!tQoA`!`ZVV@$M4?% zHQ)E9^ECgl!1d;r;rEOyBgz8JKV|9_U;*$t6Fl$ZJNs(43aFa@_8J!_^g46?NXrP2 z@4H_#WeWkL6aasP4T6?B0Pc}8V{?r}b+XKQGsM}&1 zFc`s%60dhmFUfij|b}6<* zlg$yfM!Qx20EuY7Z&CDDoyOA(pc)iTuC2c8C^c5#+U5B^`TLlvB_MEyPn(f( zY)z}JVkDHw@mn~olvrC~SU$A9IR2U6>83^7+L;{|hEir{p4r&ibX9lsrE0CI18c?~ zG@Y-ntLX0jU5ChfbphuA{Ca(Qy}p3;@PHJ(&eSFxJUB*|`?vGrei_5U)LC-BZ|oc& zmUn;Tbm*jlC>b~UCC#72lpL4mf|5;x<#|~GnF95@PJ#tJYDfn?%KD{}k>Ye{8fpT) zD&#D|1Txk_4D0t+v-(D?7;g6yc&3bK(tf5xd5Y5B!QlE-Ij#o}PzJ%Wl_73|+!9vx z%O|`fKdu#BH!IivzL8ihZaDVl>CAz2z2Y_=XK>+On7>P1QDXQH1x!SV($?))lQJ1BVoVIy4x+0c=BX;0Ywr;KXvDGCY&CKH@Ns< zQ+zqIgjGGcfL>J}z~hz~a^~b5Z9la+u?q2Kgoj#wKXh=779PtzZ#!si<1gpj+0!mW zvt`R6S_5=H-@|uhzqXEaWhXQeUNk)EV)+R>_FcTqKxw%Bc4Dxd;0Sz6Qy-_*RNOEw zr&w`#YUSB}<2<2sZ6f*vgI(#g)O0$3jT1f5Mu5@0RHQT=P(OZy9h)V=QZCsC~U!nkALP)KipT z(nW?c1wms0gfx9hlb0c4e@&dB{dF(iD@Wr%SD@`t-2Z|l9A}oy#87mej;nTf+2iQ3iXm>@Do$U!qbcW2WMN{Q0WaaPH>dd z=E>Ygs>JtPT31iKab(Hgd26ngjp14>2FyYZ22M9*|PrIuWu>B(=Tzyl0 z${#Jj_&s-L$^L=oZ%{(&CRMU|jzqHw52XV&c#0o=eZWjw zZRspiw~!*uEeJ|Hcwlw>mzwxZzOYqs|MeLt(WeL$E-;?)zbR1ZG2WNohkTmr5nBTD z`iBv3v^auv*$oeCZ2x!w(L>3%99Y5Xd*uMR!?E|a+IBINtBEiihemDYpf4X9B;<4M zEQL&o4&k$x&_P9;Px=6v!;1G!Ij?N|r8n#Vk;7Z)&4RzkFgW)->>4^tNtLljlUK7u zkm1Sq3xT7%$Cl!*cu`bLr9&qB<$-|p1lomJqSHf&_91gn3u-YpwZd2KSzOGCH=EWy zs2!&$i-bqcas%cFjPJ4h><*DTbmqN~h+=tc;GdKL3BfUPr32YR%y+bk)(O!O-{&R{5< z&w1}kv?gn6M!+y$)_`M@8W9F3Sd|+I@)*ZHh!s?l5g2Z}$H0vMEgSCDeCva}I#P2+f_?VxJAO1jgkX`Bqg;B--;1BHKGrMnf_{Am@J?gC#xECQC4N~ z4u~6Q9uhwAN@J%=EVNq8#}6Nk2c8Em4afJAMfI>P1~vn`&tpYa_cyI|PhOYZhctWYnhODv(%D z-X`%;54AetMCSDHn*1n8CEhQj6mAvLO?n42%r2!aN8JIHlF{zEy14lj&3;wJ~9~6v>c{dY`Fq8@l$=I zCVaRg3m|;ahIf1TLeP2}hh0HUet@w4B?7MuNz6-~lhk~U*I=%tg4X6%5C5RD%g4t& z*e#BRjwuaJ$pA1yy_+(ADk%Tu6+%~OPU8ywNLKh2CzPEi;5%t*awtK(`AaaWL*nRw zP1u_CJ7N?b9x0AgLO*1|ONOs@#0HjnjFE+)ovXuh_HhD-@bMu-_0w zQ%btKMgtn+KfjA29m*doh2-H6o?#szV<7>%W`vlXqYClG$BZ$L+lq1|#F6*hG|UuhudmseFtaNM7u zCaq5ITs<(;qnp}^M3-c>^7^h6wMnF2NPNU4*`w?FIzefi!+o9bS-NsouTgyR8Js|$ z$Bmk4b#N))NGBUx4$boD(5IVG8u~$HpLD$={iu2e^qoO42v3yq3F+SpWOY_ zFMvWvsYgB%^3*?iyWkn@o`@#|BYL>pfV@ChJk8S|@H(Ojkpx}RupTv%?j#sGnp`I> z)VFWhX>9>75uZKjDJHHsI02&S8tjtj?2SV;ZB-!Gk3HbjIa~kG6Q8mkyMi0+m%Az3 zE0=nZeZEtID8YxH6uC_hOnsqB7m9<9B;ZbD#qgJIcW_SisWpxQ%ocnuI@>bJ|4}iy54^r6wFJV& zEqijzdS3`3e;`I-o;w99i=6YOP`ov%x=NMC-o9f{<3suUJXzd8sZV~)?(sQA6sV_b zn3_L;&+D#}z@lM-F_LvcylTb%qUYv93x31?h(|cMU2M_v)^iwbh5C~7U>zgSQvPhR zNB4A1sd)j<%P4xx**bI^=;xxx?jMyMv(j$g%`5tEQe^A&xyDxSH=@f&@4{rzV1w4V zc#DT$TswyBW)+Q6XqvP0H*i#$0B-v@isw3x=Nl}2Qw z&N}>i-CnV)xz!J|^!&9A&l+hHj@s($csjf~K69d_#*?mZ`A}9trP#Jpd!f}j^VL0+ z=PH~pn)t4=tjlh02f}jdAK9#KS-bApYJIe#8EXaQ`5*AV@XF#T#O=5g08J9RwRauX zWs2GMoway_aE`Y$=B^7hRc~kFXru#1!Do0nfta20*C18DPLvn_0?Vleh(IVLufaU> zR)n)I9KB6z=4&yb8_<*b67^EjOi!@25a?^BXHa`Ew%9CWB@#Ap!>rZ}hhnN`3%p8& zeoN_LZCC;JbKh3Nzq?YmK=9$*nZ*YT(mz+Th1YYUFVbxgd50r$H`D@2&PNuWVRkoK z&UyPDlywcG69FSrbd(LORq8+7@|4i;aNT;cb3qko*~3A8tuOvR^ zTw~ZgVMiy!4w&;(^ODF?lO{;~xFKiSSakbv=YOBTT!f(7*19_K0Rv&Q&P3=8p@SNs zQs`LEao(VrNwjL!O3|V*+_dTp|@SS%jCf%X-0(=JU&jFa_+54-jh>qBdfp3qMWE z5=#(fRaHTW&ALCrJekvhF%U#bSX)%H1Xp`u8jm0eAVQwpz{Q!_v;72leY)O(O_3nD zkWAwT$_HuIXxI+ZYQs3(-0Z}#4;+seC##hK17!x)^K ze)!W3&#nVAftv~6-d&g|5eN4r_M=32c(z_Z#cr6iX}|I%?(94?R=7d&ICJe5t%d}g z=0~1hZ1)5;u+2`xLh-3ivh<9>)rVQ+1J|1#XJdpdB30A)&o4C(QY~0EDZjn{=Wpm7 zWbj#fu8TUUjX{I8e$dB(`>`j=#QDJfzp78Uh0~HKZ?0ZW;2dpM?ZrEv5MSgzzopoK zu>Ah@ zBTbd7MP6epvo^j_ECX+}W$31B$&KOROaxcB(#0NK9$RKv7n%pMM<1YzAin(h#HX?X z*S{1aRaFrfoAKDVl+LS|Gt3#{$^I4B(@8Ip{IQAWp=1fj)2|*wr1TZO+D)sOp#E6t zE~}mZYOC{HGHLiCvv?!%%3DO$B$;B5R%9Hqp|GcLuK4ZTSVc3v%l!|fF8f&Ci ziC_NmTpWr?+WI!oDJ9^3_P0)&vZnnC=|OXsUw|yV6J;7y&=LI(&F>Z|1ImFW{LRRQ zNGb1P>9DQj4z^p9!(t!}H)FnU;$>GkZ#ZsNnh^D0&^&jgALow;xclGOcm_N&h~2UP zTuT_y1Z)aF`vv$nInbO!%fSd}di$Yi;(zyESy*Pt5g|Zy32iQ$BAbk z!n37YTg616pojDJ_wLu)(7&Oo8riV^ ztSlFas6;fT&?cQx;G1@cvc30JN`N2^3Z(aNoJ~dBQotNsi*~{tjg5PLLU2-YIxHc?R z3lGuWeA-JQ3V)>m6!WU;wtBA4=$j<>SgRyvm;xvBxf?wqYwh*-QZhM@gDAXsKjrY4 z!Ul8hGJW25!YHdKx%lG_n*{4NNds#e9kzArxSa4Zi74mD#&k7A>%0*yRXo~-I{a1m zK~iqRW9bT4(k=N`GB0oAvv`W;{Tk}xZ9S~`x?$b*+sY)MiGY2-S?aAcGzEo#$kw%- zk~|lsgAG@XvxGN7@)z`_!E(cx+=}#iX}v##aQ+W9{G?QS+j7*KLZap_l$Y@@jmdZ` zgT&@eYYP_884p$yE$LvDgp*h;Wtak$J0c2nyD@oK4%3+6IzB!qP8zE*4hZ}+wMH=O z4 zahPD@ohXE$Nj>2qB}zc`p5=ubtJ z^pqWG6<{9m9o|Rlg~AF-Ygw|E!Gh0Ue;n#kJ06yYceL_|PHW9L8ry&T7%Z{35kt3N zw+OJ-#cX&Ob1N-nZJ)WY+32O`#UDI&Xi*n&nAlbyuGS0LPAKR$OU|Av_Ubq! zr!mj0mo={$;7hgMs2}P$qtEU@(ruPj_UB@S#2?$k=;`ZLUt_-B!t$?Y zL1f!9Jl6&0KV9jGc(fr>(vu!rb*ov1f&wKHBs$3q)xX@-M=<;#(7T!fXLFS|2W@aq zRdvTFcFerjm>tjc(og7T%?Xi}Y`$X-GdxTaf3vaYTRywjdQSzjV~Utq z!{CROf;YeqJ~pdJ6@fz)Zz)k_)yPy9{B9uYt$F#wvpxDN3^BCppj~;j?y`WnunchB zvL7*F(1PpF>oht6^E?Y4q(Tix&tbRc&uR1CFLyTwYd#0{ci=D@u)2AiERIA6jYvT% z-Kg-{wO^QOuB3Y;?%L0qVKB-6>jiz$IojUC@y)l16eWS9obr1E`NBS6DX$rFs~?h$ zemJmb@w!{h^XWtSpD)#|G$*D3?mCd3!#J4kH*6?3HAQ1(4w3Wk*0Pm08t;Nw+a?ya z)}&3Eo-Nu>(o0jJfn!#;vdECfp6D-9W%WS?lF;Mov>YOdXE|pQ?^4Tn-ubpzAF}^b zAJ`oE4Z{bH2sT-ELzQD@Xr*JWn702Cncp+GR_?hJ7I@Wkx#a*n-7lN~g-g)|&u5h@RKWa^vBa~QvOsQxLF_Te-O!AQi zmOp>-m*_oV=yp@Y_M1)PxgN;x|I=mKe z^p^O|mVYcdekgy(F5DXl9iGFlVU_%RtSrt{gN}D9W5gYK1EV~7#+~#Q`i1vo=y(| z`}k+INxbx~3s1O`&o(0AU_l7R_;f3^)~&AW_IRly|Dh?MUq6>G1R>#*dAlQe)MFXNK^NxNcIaln;G%%YB~uZV1n=S50Q2BVUT7_92X@7Lgk;nO z^Hq7pElB>LKoe+)2`Lah%yEUe>0HAV7(((hj;(5Nw6TK_*%?Fm9-6;u&RC4^hdy7F z^1Whg)FYF=9AU8BhDK8OcIX~z=o7!1en9ee;O{4NBI&=?V=W0fJ zTe81?6?i2?`8vU=mm(@g*=$9FksagV=uaGX#C4z*zs~+tAyWf3tb_ar{*r-{Kk>(n zq;V$e$0suR5loSZk%<*Zj8%r70Q>0xshrw;IWmElW&yqe9P zX$XWzLvpcjB=vmoOK$iehxlVuLw(L_vR`!ezn&Ky1dqOd{IAiqaIAdc``@=RY)BB0 zAN2n@UHB=d0z`lv+^XXzNSO?EnJ#QL(g;=#lDrV3P4?b)BfPn>^@KysKT^kzd9mV! zVOhpc znkOgpkoW2v&d$qp#8Cgz#jd+ahFAZ6Ry(t~8q9VagBe&kEkeEk7ZN_&QZpZGHAz~| zm&)McfJ@Ce7dHG*Gwk$=*Cc^BTi)1{Urz5KOUYU)9T##bT|7LqCVnZ1)<7PBuKuuE&`)NRCR8 zOaU*K-4Vyjvx%PuR! z$3aa^;hl#mvqQj{0Xd{R;H7&-_^>SNl4EALZG^9%--6fJuQtJIo-hu@!xb5SK?dbS zUN@5ZQ$#0oh?Z6`PM;?{bv%dkwKz~(1^x$xEnPC0o^%8jmK$@NDMU9gTbfIib=@vB z;0ZVyVqvsOeNFCcm=6$L6A+HcUktNKSW6>lm_YK&_{S_pQP6S@Gp(``Xc^XnwH}}M zAF=GqgcpwZeY@^Xn2OD9`zFWZk=xPNi5i4_Wfc zojPk9<~tVW)Ooh&R&eH)&bjIr&VS@HlENKX7xB?AdNnBs=NCPq7*wnBGp4MilxZ`_ zS0?^+sVmU5@{5*hSUxB9tA-EgpL1VqSnLGyHUD-BTdYFAR!j{3;z@HVDOi&Rx<=)B z=ntQ9I4@iA#*Cdp*l^3ZMbGS#>*xb^=tyFwa!^YXu2k%8Ow+#wXNIp#x($wNgjc*&Kj0>P*r=wBA9~6Hmh6TF znaZyHuYw8#Q;lK)6N(g#Wa`E)V|idZy?n;9!NG35r^A7kL*wW_adjxwZowu4cvQaVo*mO+8r5LR+Mgz=HkI=ob4Qw_IRllR%n$ zh_{Y?KT>^d$ALWMawfCNtH!xx-sukl(Wx$SeAsoGoMqbY1if3viKKOg-N~c61WzqF z)a*g#8g6v^7L*)$xoC)kYYVfQEa)j;pLtu)2;)xddP?3l$X6fVK^A*kcP?vIde1bY zoTZ_{y#0E$!PcRBE&EQ>Cnums!i}WdYA(-`M$kqju@Y>Ia?qaIdp9|fDb90zjIP^a zs$7DOdRBjN(VjuCxs@EXt_N_dx&SJkQ#_LORf zlPv0}BE>Inpgd$7P{!4aHS;yE8!f=cV7=~*t}DgzlwQ{wh^OM>(~aSg2tl6q;5C;( zQ-e*uS1aCD`KfI6{GxT;wo+vAi42v3#C{fEp||h|L9fMQJO+%H;qb?a>I~{LFDZ~a zy?v6-Od`#}0hM(?wG*8dTi>QnKdiq{kD*38~F*ez-=W&9~!# z@PfeC7*2M|aauscu$f4eVcbJ;{P*GznbHVyHNm@e5DMkAcJ|8TOyd!Ng+h3HVcCf> z-frSi%xDWUkeoN)}RDAVE| z^GAy|AdQ7TvMRTj(UG3tbH_u*wJe{+=aFI{T>iPJ_zIyOEm2DBq4ja;=!(iA#^$3SsQ*CzF2%kqG zZaw$|a}2B+W9!H!$X!M*(1_Y*%~uOx=xj!C7SQF)aYfIJ;Z!~PU?LPW>nT+6TaX=f znHpy=2fvTeF?d|k(}ET(E`Ymr1~Wvi*n?R-8|P^bclWKRa}b z5(aDCEv$Ka&MxIabc zjp&OHJT@4$`a}rnn|Q_P$+%^G3kVR(_J@3ZoPk7~r) z9|aCPkkA|K!%c-*S0l_}N>4k6`@ILk-RggC+#6A{2+v=L>m)ouV4AHx&xoe6OmBs^ zxiZ_`1qc}3h45M3iTV*PWkr~i_p!$AN_qo=CC@SlyL;Vl8!%%Km4Jpo*~3 z`p%nUQUNi9oM#Fj#RI!1w^*OxYLnZ#Wq=)QdkqyqtY?=!f=4!!!x&6i)1nq_|8*CI z%?m{LOrA#LOtXpbX6%a;GV&IBTlZ<&=<=F42~KObJZ>C%?&Zfd6X&0lNYj#S%uqak zmvpd&3pTOSveTmZLN%oUClnJ(F<&O{2s@Z;n8x(@5TOhnhTr^uvLYpm2ziraq5<+; z6Nh|gjA{DBkjh(;fkiWGI#i_)mAuJR*4$s_zFo8M)HQ)}jSA>7rWEi2$&M2KY_SdU zRhjtlI_o?Vxi{2m=tB$b3`tD?k!##fw%;aqte>?5yJ;1tMnXQ?ej<)=V~YWF;Q5m1 z&KWp0LJFU*y3Hc* zOe?*T5t@2y9v8RRO+9_>0a50ZVV;m0V50qgc9~{ff^n>;B}eW0yBL~4`c}@8aLRr0 z+l^C|E!zHBKMmdStyp5L>xzAb{M&VaL&a8-KG&E0oOQ|2$Gv{n4i>;1Zw4V10J+Zg zVWQl_aW+0mML=pq0;Pd5xlVKnnqn7wN3M_R5z>y>vmL8;>GYO)cFSyAHo|fu%Gf1{ zqGUd}l=2O2uhpwBqlk-5zrgu5AK!rAD^J5<>o$^zxT~)3(LUIgOWl>CcyVt2Y`SL2 zSXlj&F>lkQ4}b2iC=5tJSRlYP?CVuX!(PT*#xmg@wfU3wWp~{xWb>-3x6bo%%jd}A zC(wEsL_TIkXKTsXZit?+>GY$;k{`AiV^i*rNf3<63wu@-R&>ZP4=QQPmZN9*Z!+4s z!Ke1)%412!n7I$83zR5lbB3-|IOJ1;;V1=n`w1^HbKQg6k~ZMzBWC4N>=U%}Q|D06 zS{@X9ED~9ves;NIzGD4{Sf1O_Ur9V>CF_b!)wbyhdKZ^qRE~FIk;J?x-EZmSc+$MU$yw`&Sp5gmoAZdqXT_X+erbn6=li? zdpXPpIoJ4$@eO!#N>N#pB;FBv`gKE#W4YyOt5vk!uto=&&#By`?$Kv>;Dl73L}xbm z!+~`H6tM}ZHjUGZv|5U4HrSgiPe{9dLBgO~{){8#@(_TjAvO^=#>Z2~YVo;#y0l_7 zKa7N|$jWYK00n=01U}qtH|)Ww`hm z@%^bkE{~BXgsH@he5@MCP#P0?ZqjqS;6Rfc1ILFsK_7m>STdy!K`-Y)&7bMrEl~@Uplx_)OX#`nPkOiefLKbjI0f7Mnkya#?5a|$6LSR>r zlnx06q*H$T^490~o%5SHbDrzE=bC4pndgs*bKh4W&3K%dz-QVzI;2t^M}6aPAjQtq z(~0EhwU3XvB!-yv$B^OM_k6A0=85z@c7(SL(zPM`81?=3iH0#3EkEw&EN9_umS7K# zpkhSQnov}d#4c^4)i|Fex4kKBH(4J2>l}>w z&i%Xhm8!E0Xwk%_Sq2*u29e=Mv73kJoobaP+BEn2_aB3!G9S$@e5K(RP`O+X(P~m? zu+90r8dGW-FHKhN@1QA`-A{2Q3;B`6*Jp{(RFkW{>3+_q@nJu4scc&s5}CG|F}n%r z$JG=cevr(Gk`I@)3`=DTBi69G_g&}-d0`RWK=!`VD2dt5)cys-Dy=c%+*c0fR|8Ok zl0*1$L&>kyrcGMAf)irTU-iqh_((j{nj~cwGr-vD?0&Yv)kA_=Kxxtnr36vjOO3ok zsh}{a(JOlk>2I|1LQgPg#tA9v4W9HoqvLu>cdQTvkC>06Yy;osmn@@`&IBRmw z7ffk^TrW1vk!IbYMZ?InU56LcKbNJinmx$efM*8=lEzS{@0#?0*Ca6myicxqzM4_J zC60-v#)YzY+tS1;Ur{Bh1aKwK&+=^RR}m*EP^8ytbr#EhS}rkZP5shOyeJJZ?aiS; zL*?Y@(lL~kHbHEgQJ+gbDMnnzkCSe|bF;N59>{A{<|r~Yap@f{Nyut{GO;}-{bFWH zhkKXw)(Y-}PpBedLp7AT#J+f6bPK)hV245LxrO@aU3iUfh}xh)Te@*$!VHvZWbVsd z`7GaxN10L{B8e{nub1K1d>IbF)*%Gj8f^ZhQp3uY2y+WHnq4tbfJ$!La%p903~{jw zltpI-9VB`$9GWi9orKwy)n;W2^fS`uu=b&3w7aL>c&N1A$l$1bFQoHq=emkdWBBwq zScQ9=K?CUS^~IVAA()EgC(hEs)NLPTcE^S$ z{1QJ(1!UTjDzUzb#P>sqGMvazkEAQPnZYy5h zF3rsS@;bQp{f5u`X3;t|!!B2843GEdtilr^{Ko3~&Y5<$GAAvZ|B=+3q7z?e$&;0@h>-ova6lV z=g8j6$(1-<5)S973ze(S>K&m$$1#+N7Kjuzv*fgePy46G?dZ513N5a&&voA{pSi5E z9!QPf&Bm`e{(wvgE9Y)5e(Y#cZvXX2Wr=KVD60)27Cw5P@>+9P@Gr)iAa=V@GQ;CH z6)upex`c)B&%0^eX%T}EjF**64{7^-&jsF5(mG{gD>YxamnXEWO~&JG!fssq3y9T1+)4guHMPDS);3YJE6-iMZy&ag`1hTFyw<@QY>0G*HX7k zo{6BF=sJ!s%;yJgj6IT^zQR*XS~03SuS0|PD}5Cd(WQ9dra8~hGs7HD3n?F-Fc}q_ z&ZjcQP2V#06xOROUWfct%**Y{^2E8xuJ%bL+eBA3hR-bNvrUUha;@nt{V1tSbDZ~R zea&j=THU42`MsFvuo+`fGURM#qlYm>ux=;v^#z;ebX+{n$2{Fh#pL%KBg2? z6ke~u)UekcS*xxR%OQf6{7DA3+w zK^v3WMAUg#=3rcVuD-I|^^5~OzF9n&vV6IjvP0c8E~Y$wbJ29ikR%u_;=SxInr$G8 z)gW)j72tL^Wb48NaFGhh{&~&tV>1Pv!k{yrHo77gJ63Rgmr}VrUdp-3FL%h;v(Oc2 zE{Yr!zuW&}`5a@bkI^frDqdQn{zmpv zmeCYUE#8ytiLbL{P&7C4MBa4Wqsv57OTyC)6zBYyDt z#8a0Lvp0yT-bslq@)lW|@PYu@p2GChf}{s}E{w=Lb_ERT*C^<``6=U^O?p~Q>Ms(c z^R_Q#dh)qdCVfuWgNCC_ME0tfbQbE-U~>>k~Kl;$#`$CRxR zhXV}Eago`Tq$Kn-Fxpi?)V&d-D_6HFqZ`pFQj1Qawm_M@YMbR0^!-A}PVKE7j&bLN zT(N84wDsNyM(kGpw~-o}E?;6xI6Vqr7mOAoy{P8!PZm43*ltS_R3Y z?V9Da>;sn^sh2oIgssCKkfAgg_5=9(w9JL~lnqG;Md+aD2&~f2JOiLxVzhglNK8bu zM)++n3ld-F1KUQ}Kub$n41VxV^MyUbVm9a`lPZ&{AVM&r>Gs(3aTr*q|E15^kd*6) zNLe>yoTVHQBPQYFyznVw>?sNtZ%}%GTH8 zFBE#oES>WkabUajM$xL^M(wi>S%-D{THQpABM zJ!UTZaST23>D(LkML$>_3AYLg3w^dvKx8Y5V_Gwx2y+El)}) z3cLUwTS;R?__}Aw+I3!+pJ}Hm7w%-yp-Pp_*QkzV7M9=EdPdZ%4eJKAB^(~UUoxO_ zqY*hY*4=%$`r^EC98JjDQ@kQt?}6z_;GR-2$#q+9_Ej>RC2( zD~2n{(O)i_TGNAmkqGX4cBFhfv}|KTigQrI8j^Q* zl6Lm`o}6|_LMRyXBk6V20>o;_07VI2R|hteKB{;-}~?=aD5;OWqbEv zc%O{ZW==)fc}0NNhI-mb+Lmhi3)F^Y+HVJ={{AW8{`B*vH`-cCM7?L^VUZhyz)_pZqM0osX=I9hvWZ^8NkB*P~m`2PI)015W!z8Hi3Raj7fBRWmQc zcEnKby`Y>#0SBgJ@z3$Fat@eNilAdmpy|P0WuBV=1Rm(QizSHoa*~FS=_*yP~>$ zJn$Z+MX7TwsqRcBqLl+u>Sd-(e1107=6y+&P8*Uj7B`K`tM%T%-Ky8@cXiZCUTTgd zjRg2E`Y4z`54pzVcO2DLZEXoyybb8yw zf1ZP$|tCEc7Yhyc;m(inMR>IkzFe9uLJLxWb~sK;2kZi zfK(H+a+dh*jQ-nvuhw~)52`C*(;SRiKdZ5?W|XJ|ys~1lbhT$Ws91l-V57xF>=_~W zx6M)w*sN(3)g|u%GJj*8G1tOuHpb9irREkfohPYS+n=|Xnjfy8tq#2(Kn6eANbAFh z8^rEC!%ogBGGLOD+N+3c{g1FQ%DQ`JehE*D?GyDg=r8ObtXe<8H2* zvC4?+O@5m5?4M{B!Q~#(8J+dH9CYSU8;8Cmo4{qgx~uOsOl*u;Xw6!}7bbnwAsF;Z z#>}s*(7Bzs_)Wb}XL20t3!2^3X}l~vgVU9z-=joaET}6qr-AbOXal)x>$r&W`1%T& zhv*=MpUypv<(HaW7l&$SuX98Ek)#`P4#GwtYthgNF0d9`P`=b8Bi)rXO} ziOE-{M5i-t64KPh8s1;(qp_LtdSlR4@_ibAzlC z{3V#l!rybPzg3oDYw}$aVh1zidFac>`smkdy=GOJtH>!)Kyd+gzuta%i<|=y;V5qO zB0&BHJV@lIZ=JH?%Hf@F+WFsDf+|VxeDqDh9Z2Jv=LC!?TxEy-^5YhR{7FdklG}XE zuPAx(a^;R~@Wc%ztr5fea45nDqVCKs@&z7>)U4H9kBiC)RoI} zHPMt#)IX}4jmpOTs@y}`RL^ueQn6BYwjD!8m#W`e5JPsUMmxoWbqcOYcV6+?@>LEO z7y^%f>6y_=;dYHaWWzBesaYEA{ze82T{&J>jeg{fcbFb9Xb}aMe`f+UxLAFDUbeR4 zIbvmCc=VVaC9fdYXTrv^YRapIP4VDiIw2BSiS6*byKw!R$!3?(B{iqH0aMpapPChr z11rWj%t!i5kOS}NYxfBL?$A0zZrY?{2ofXfTh)IUVUj`JG?Pjta7-@LGwX89RcY_w z!T=_Z!7AnY+5g)x(Qe=z!7xz({*T)Z!S05Su>HN{heK&V*D)jzMg!K5nE{HgV6AS=@S~j3HK?Sk7Wd-hoE3VJe2m|VQlb$s*^5&w&1CzcM=Kg zBTnHY2nTJZ5Wu-hr?hlR6X26Nh4eY(AS9E8uonudPs0E~*}uXpVAl*3d`Sq&%AbZf z^I5@P(+EIH@syU$(1MmT7XfjVzo-_#t$qsGXXOE3%~NPq#(COv!7L0Y(JbA>B>(y-{kNIX->1tD*ZKdt`OVtsbUrQ+ z|L1%}-v*PR%%A}=9Lyd-00|y{Q6_ME;3BZ=OQ0N}#uqlSQ$rZg{tGiO>USC}q~Ztb zzd+%?`8fPNDngqdemm$?NIED4|E)OuH<4W^LBt1&4MV|@K^V}H1meQ_ihmjp`q7)e|ePD*K0L-`AIlv3WVTwiu*9K?I2tPO;%fK#=i@=j8;c_sV>VpJ7ghL*sO$(WwOL+Zq?!0CulF)v8%z zn~m(J+ztvpiToqI*%Gt}2Ru#cLh_^=%cTyzi`OKnmG|02$Eh@0-?wNCwms76R>qM#VQcf1}C(YR0wWiI)#OX0chV;GYnaoS1 z&}-mlCQ5Fp=~;V-?m=#8mtk)fx7V`38l?C(wwht<$1O5~(qD~=!`7_X6u@6&yc7%$ zC_kZA`GMDIr3>{B0FCh)&U(C7`ietj(^;74>FZRq+X|-Z6#@7(f4mDXe!XxmgLkHV+pJell+Ny}Wr(JqC1KL*|Zh$&RI$%a~QG~e< z0XAjqoM&EBlZ8uH+5uRH&Q#4_e)=;?43w5qS#1?|_+3kI`mVSm!oCFs+6r*`z-ebI zEBNt_0(QBVZ!Vz+BL3+_gCh1^9Z=b42GZmU-O$v9ROH^GZXW4Y5z2S&cfSB{-v|j` zaVbMog0r$O;CzheMQ04HUa30@x0p}nG|tXv_#7tIRpsAzhL>W2;1}#EHBB2&NS^Vm zHY9TMa_$G@XaL{`jk)6~@t)y=B)*z$_<{&7ad*!AAHDG($@M>qQiA!2f(H;8XX+P{Me&e zwI#!H=D!D6ex#pyt_L1wcTf^{X)?!L2{vR802wvPC~3+(jhJ!!;xu(>=9VOB08Ist z75FKfz7~8k<~CAGHw6{#IeuJ9^9^neXE#@7ZBJ7qf0fmbQPDFp`t>co&Y$j3j8PXD zdaa0nO>kl(*SKtO~+E>k|~vB5w<dSZcb` zdb3s!Ji5c$AP7EOT8S#L(RljAP0}Fks{i1w($#g?sMWv?A2F1 z+lSSSX1)5q_#wdk)YeA`KONiVebp$?j3CSi@ZBW`A4D9 z4#zZ}7_-bK|C&Jkur`7kdhk!5LiA>7En{eEtQ5PEnfXY0aQCXEWz9UD$wfIazH_wx zN*#(w2U596kBiX6lLO){Zs}?rD`O7WxNu}GPpv?aXkgqm5KpLL(q%%y$6qKvwOA!R z=nJi75ucAK^dBLapd%VD#(xT8Lg>Cn5BG>``=%&=^$ok}B#ja8QvNL{A3y#}dfzWk zU^IpAaSdMFd$1soJzM6?Fz-*A_$_>zN9M#vlp~l*Vf0J&&-rPy&L2bD_PQ-@=KR~q zD|-{3((TB5)^?1v*79et+JSy}@X692kpCqpne=vQ9uWkDhX@3O`2P_^3artF_QV}^ z^NRy%kahI>ok%6zNT)?PyqM^g*l3baNG8=S7N1P4otV~_7z|;uKP(c4x|x8w?qu>F zo+ArL=D3t&512Wd+?>?%e_v#1i+(w=77QTar{LIM`e2_A~nf5>Hr~C}b z0%d@ubFbaS^Lak!jAx8JPk;~Fd0)f3uNJGH5n0}I2lNl#5Wl^W;ip#v9kG9VC28k` z!%v9fkBT(kO=$+jK;<*T;m2LE$J;_LdA5K1C241FjNg>sX;wd7Krrurk|qD17vj!F zVZOym0F2A5DM@cteAG!9A~!UeK0oZcM~S?c!4EIR1Ds{1iC4l3^q zDV$Z^;>R@uBX%O6?kOJSTcyrj6TIrZy2tu7f5qv#CHJ>sJeUCeVgvO&hu$>i#pxP1 z%o5M8TRWz?(nIq68x9$!sR=S}qbjYS5)_0|mb2Fin}-quz*uqDO^61I@>e5=-TfWZ zL4EK7BSXfuPQ{C|=au$cEF1WB4LatP8MS2qg-UB~eb}?-hEhjTMWdQH+MeZj6Si9{b!@}fsaI%)ZHu_+{h+~7_^klVV#cz5(UToarJ}IqLX|z?Ph$Uz6 z6bes^&WbA6GS2;D#Xx?laT8=VJEKKnacgo>o;MoUv6Cgc&NOv&!&b!9SjwBqWCHw3>AWGx&GbEBmgL72CT8$~)of6RbGo%W&!ermUVTC_Ro4#`E2j4Y%w^L7<) z>OTio(3o8&sPcO+tjFNEEs-!^G&!S&Zu77qxi$|?t@Jds6SGr!v(d&_ttMBxWi0cy04)=)1uqaNj!f*zV0>vAWvc+o>=teimK!NTX5Ru2z|)aQs>bb-b~Gvm9kY!Lh>GIuE#Y@;L=8ri@K1N zxr5oLPY91JDq`Vv1aa@5Qp-JIR@J7eWl{|p+SY&&KZMT!(3*ssK#Cp3my3_hpMmjH zXO^*f2k+F*do{9qCmU&h@=>*(z|R1Mq>h4cdV+Q;=Ec2Xkg?jX&16-G($=~MwsBtP z+)7j5o2U3CL-q_pKG|+gMR5=vr4EG&7S6z*swL9SBS!{(v8*y^=MkXUB%15v1JFW1 zG`Pr*c4;%~Mxjf}nrkGw`noFW5bNZ~U3 zi(ujad^5~3iZES*{bfjd>3z(!In6QU15PoNSogZ=VHv%?EQd!kfZy5kssVu^^aEGH z1EnLTzg4H;w2hi<-@W!z?*}Wqr;Vo3}$KBg4Me`-a-(h8i&6&#qGcXH#iz`OV9Z zaIYF-0pj)O)}8hQd_JcAdnV6FE7>PUhb+w+p8vn#A{3ROjE`r*E|>bGJ9gpC*_U=5(+z5by(vT+mOelok>&KSW z=`qDBdqMe6&HqxxA=>uU32aE5Sjl%fhs?kVvv!xhx4GXoE$?|(Z{Oe4d4cI{Q+aY; z>G82uLVG~b1Se7dlE5E7R&z_AZxF|_vM67rdER7>A!g!A-@1}G`LR#e2YK>aJ(;_I zt#Vq|!l38(OVz$(9p_RjS`oe^=Xu?H6hO9>uFvOX+)-@v@O+vpE!r{di@#4$pvJfMj3;D#9g~WmKJ=TZucB0M3@i zF1H~}WOcfi&0}=tO0fNGW986^L&ny%D^yKB6Ek^v;8!vU?l&89$~-_v!t%`4&llkk z3UJ?@pYa%`tV*``b3b6O%_NLlR?6bZzX9_nHB2!uO=*CUsh$d3h6R^b-Ezq#B+7C!~;Z$>w$BvB`0B-1;zLa|E26 z8{n8lqlZzPKVU*zm0w6S_)94&ySW7)uZ=CbmrsS;Rgjm)l~#`h0-f$6yC?u%T90x;5A8Oj*H@moj# z*vF_ z>b3t+*lY*X(T!BmCqAxcDOhhq?0YOxH2eqzzZhvl80~{;TQQ7U#e|E6U1{7rgx4FBwNz zaxYZ=K3(czULx(IS*QfctTQ00=SF!xlk1;w2I9fQxgTR8amc5f(mc^w*cj@M-|p%W z9c0ySRO-fzyM=jWhC-n|3RUNaPf>*k9rS5&XWZ}M1{{Qwp1e=rC>=)Nyf%|wu-_~_ zAdKoRsDBa#Laxx*A2!vv;my@yYLu+XMDjGwSYdj~=VW1es^^#?4NQQX@fMjEr^hDa zEuHo_Bedl2{$7h~rt@Uns?~G)${_;0F<`ay!+T!=kkf1&UbOg-%jJ=E5(u??5Q4Bz zfZW2I{ZP2=X)E{rSB$Tleg{rJ{G=tX4xh-*f!>~&q~_aDxXLs`|^g#ZxjMs|xUx8!C`*s55Zc{Ju(h(&&w+ZFlbBDw_jo{xd1#( zDoNZ4Nqqk3az`ywMlD=?LaAI$%&3$%tcY`B&$ z{CXm2jbVaFc%t5$S6;@*Plo`{T(D3^%w2gW@7btRVzue#hzpqP&E$e?e>tfHR7|>6 z3jO6vyqadkBr4h?-)qb)a5chp^{|Z#)-b(2m5yk6CZ*f4d(Aj%#&u==gg-s^$B63z zoyQ%URxv!~6|?TCs{_p=OE**&^DM$Mh4}27>g|x~ISjezFg6Zn4_l<8f~u~BstN;b z6(M%ic`Yv8biS_+nK8ul7p?q74f;uzI9w%1Y^)o%uN2;Drghm#EFOy zV3?LLj}*Qe@AJ5y{wCkSDOmZ^cJxfb>S5b$bRjqikk&oJfSEQ1CCfyV#=p-Gw>!$`VI|CJQBi44rq zg7QQgMgM`yX)aqPDL}op5-=5_R1T*86=gvTE$v7o1V-ZMf7~nu<LG@S}O!gH7P|wNDG85djI4 zTVTSPTOl&sbaZFSy;ZZvO+!Q00S25^zvF|PeLaNq>sCUUsq#cNxEhuH@~jB-QCpH3 z(b0>KVpP3%?iT5%RiAPluT#0V-l8?WO&YX0y3;{_J#>RHxE;m)@+^W0;H36!iVX3L ziiGs63T&&;q657d1&1McI=rSC@C=LeIM9E%+;;Yi!`rzW6&GZvC?EPf`T~B_2>2sb zju~kU|0YnmXOckomFhP~zjP8G)^EQU4Lc5vd%IVLBuvU9OpD4>x|jB?gvlGRMB^jj z7NjMX{=pMq3}Y;RBk3(Zn0$*2tgBp$t%IJrSle8{00=hLmHoL*n7PThmhAL+b$7c( z`7Ne!R`y)lo{ML7(NLr1Yy=GIThd_7XnZd2F^nsN4^SF^X?@vAt(Ef8MJQvKY_v4g z^l^ygsq@!qtS~X9!*1e)O%B0*fqm1N_LHfK7)l(ebv;Noe!dtz2vu8%zPSJHL{EC8 zo3}(9Q2~=BDP^ByGdllvDmsrYL4?QFPz__{-q^FPTYp+QTE& z&6sKnO_MmR(g#?lky>!rMGeE(Y9MM;U|y#uB%*;1v&eVROYB4v|Mwt#X{Fa$vJu!= zv!g=uuQSGMU*92>vshCoqDt9o>2?>K>P>K~@R>Di|lD?w4 z-w@H9rQ6!_VsL7?VR~ow&c45g?UCB4^|0axGNNlPWSmnBl!kVc@MbouMc!FTLuA*! zoGeO~=+ls;2kkf7+M;X%olh{4d}he)zHMg+9)cfG?9$e8R)MM&9EWcltT|T>ZFHkQ z75uFP{2i)<&dvt?oDdozl(E(kmAVuy=MuzfK2y!;>|0N{+RnWiQUP+{h~XTMaxC^2 z-#9OYl7pxTD@LRx`&4JEb7Ey8gPiyDAAjGJSi&e=?yzAR6`@5dKGh3!GOnQ3(SKUTHioCUU4lr zN|!`KG>s(s{!JSsgt*{OZ)?;ju~jJko9?aXXa;`q6-wXCpdTJ94E$*K8?t?& z0~hZ+u(yDFnW4Y~oXNqQDOj5XA@%-L;Qp@jt|`n<(Z17{W&siL5Sn;0V1RN0UAX{S z{4GO~lS}w%$y4D>` ztW0}u@ij4ev5XI{@vjhKgy2#vCeuvOo$<`B0KLG&Z}cj%!o#Rp%FA@B16&8BHc%^4 z2UETuJZ*{Wn7J|2mfBTJ%^0aXcpl7Z?rk%?HnR`u^KA^y}m9^A;{dJ-X^d5E+jRZ{~BlB`cdeqlbR z0~d@ucNeXr%#+;TLi-FL-KuH5YB7=(CH+mfH0GW}4FHGK12yZpHU{LY2`*8m>ZNOt zF$D3$c9lukXla^FJf$Z?9;FLI&MG=>o_O2onkOf6oefV6-Q=`p>m`%CsTSCxPWn5T z)oAalVU+0lj8h-ub|sqDBNYlYZt`XGK#obaOYyApUJ#5}>O2#vn)E-l73r3PTIanC zVV4B&22Qu)z8A^)v2`d8FV8Dl@8M|U`s6N{@@<}y9B3GizhTquc*J|cRSkG8PkF06 z=2&LOe2Nb7v3qmR^7r*NV^jhB3Q*Hyryif{p`k%$d-IBCa9h^|JAy|IqqNSfK)x3l z`K{O#vy<_WD?E-$6%HdVx4(16%&?Cc(D+m(DuB(Y-rj7vl;VS4RTaZ?%$J-7f};xN z?S}a%0psf#w+iy4!=tFcNXxt}1I(7#z*Ws5HpS8~&mL$(+qJkX;(uuncf{ep-9?Nr zEmf6RHY zK;haczzfv(q$HE3!U?-318)D91$#M1tckE;y)#;%>0YUj6&7cliLv3FV6<*%gB4m7 zbTe7dhf`A_4r2UR$_}pL=f8SrmB)Da&sTWvy zoYD4sM}!8Ma3t%K1*Q^HPZCD(Rpf{L?aD==Qs;aDOLCYn7%BdbK({4knH0v}>b{os z=1P;V)Q*1)804$5-A9$qUtl6@+~KKoyL6KKaH)?D;`!9|EJ3h(^|CXqTU<2!}6KX!Ce$@% z21n;Pbe&(!VJ2^d(~;VF=&JmY*J5=A<;GW3u`}a*?H6>}Ml`auW&aS9g0kKqxNXr6 zl7QKaKrJs{G!OKDKaHbwNuUc#BA8ZLI<_v1`!vCWA|lLoC`81;5XCuH2wB8Ute01G z0p3b>HIhA-Dc*Tn;w5XgBJ(4kLN+}P^BOgh{Fj6;s^WhfEI8M<>8P3WW`AZpzIQ%* zUq9t%zE2CnK&uA?PmICo>=U=T<8iaH&^TkGff&W)cnQb@;lV{LX2o94(UNUpcO*B4 zQ?!ixCnZ~WrzZ&5(A{zpoCY(~IggH*2K_}{=G`cDCW)Gpp71x&`z>-Gok#|=jXOk# zF`lS(-5q$Z2lR4p8o9kSc*@;9c+A~FS@TFYhsPcho|rrIrtvjWd;DA7nggFAp1|LP zz~B2p#J*Azr~*^CgvJ0$GGDb3o-M{jXhDkoLlgy>w_u@RP>TBe z!+LMAm@|#wQ(VZ242sgSY>sUVt>mor52KBF`leNm(sa4$h$r_p)J1bSjFu@;Z$7&! zIZCBX~N)<#aW$A;(N83>%U=hl&n*EFUbk5OP5=GAirufqPu(R zMLAn)T}_mlUdw~`64F}TDeIX4~?${v>N4X() z`#8z@3iot9)%x3*srPwddZTWkAu_W1lN_B1`^`VZfLErGlBKf5Ff>3~JJX=C>RLa(jHxP*MlJ6`C&ns-oN%Kb@i zNr8fgjAD9V>A~e1w01+4@{<(`S#6i&)-w6lqlF4=(7~PT?B*HtWY2ZJdw=(DVR8qG z`xXGVZe{Y4idL#3>zb~?Iaf!=P2{uy#*yg0l%_z5y$@NrdA&JcQ%Tf>yD^W_e1?IQ z2OEuEYR+S#jdE^Us(~JL0f)6s<>5(fUua@Vt65C8a-J`{9@*(AyJgx$*~^1ap;ubw zTx65u2Zzx*MM}a*CW^VohziEZ5SBBYgY@1;ri%IBcE7aCidMWiJ(mi5$me9sQ;|lO zOQQ*vh1gbEx6m;lvo%{~$yuR}w5Gc6jHc2)^NCVMlXaH1-Jq>CEcZJb_m29ME>CKS zSCnZ+*j276wPaD%R@u7y4`f4>!pYn_8+(G~v-L{1*GwiXaYK19f{03@<*$7&C+cD) z{~%@tDzqtc@+HMxO_W{ro>ql6C;5Hwg4Q>?WZsobOE)XvIhKdkeLL(5n4=|Q`g$LJ zZ#h$Bu<>x2yzXSFU1k^Hnmf$4TPi2ZU5ko?y}NVFG^B5zD46P%diPc9sgX`*(l@-; z$Gs^k-BN%+LCW8&i)_Si{-7QFaY-Pi{p)c?{55+|eDzwaznRwL z2}Y~4wS5ZH3^5SE+OA-PmgVKzlYeSnPOep49GV;) zgCsv>Fyd*bS%nRKOch0a=s`p4Y)&>$l-oC7rwJaXoJe>O331{R^Q{N78)vQ4zcA&X z<=|WP;Ifvp0>sZhY`A3IqtLsg!4HSQx4h8Xl+e3n(A#k+y@aGw{LM11ga&;510we*h?m4td*dR>tvLPzJnv!BHOiSHL%smed$gA*;DLX;zplORIwb`ya4wO_9FdT`poYa?8&Z_JGaQd9wc2VQVNpK zubMD&wr^OdpP1ju!lWF6&^&4lDGLlYCHN%vIDyVwLC<9}7Ig>6W{OPYp&P_$2BXGp zgrWO4!3#*&Vv5NBkPzq8j0ZLI$uv#8kN21vo3j?A?bl&Hq;gi2|7BAxZv+5E|GJ_Iz@Ak-kU-ehrEw?s= zkU!N6op-HzH=hHRYf}blrxWmX){qp{hy)HCA(kP@AqF_h^Q}0aYG&0}OT(A$c8Z=3 z@3~ca?6x-=?WbdW-Q}w@a+iLat<=VAW4X7E=@<8u3flmF^Yq&wIIBt#poMN2f;wJl z;Qkx>J~veCnq|0!%Pp1)FTFjX!=uGoa=$tyY}GJ{D6x-uyP3EEMwfpgge@HCnIMj3 zK0EXV9**k|1o`ZS${#lt^My4)Gr#Uw<2HCJ2{DEJL287u9;-2l`GC;E5ZcX!mZer_ zAt@?qJN=+&sO#!xRu8E$unmjiUz~E5T0dNmGPL(S$(R%r+oTjP8wC}mM?R{|IuF}_ zv6AADhxQNd)2s};0yD#HJ+xj~2X^%U{4kSQQqXTN4u2-MSfuudv0tJC4Ccx>qvc;x ze(z{Hy^fJ*X$Tae}2IL$`)e8ybk+A|PDC5o;*pN&5 zUm^C%PG&{3v7w`zW^HFv)3uG+^=HG+uSEXe zX>Z0jnb+^P&$p1zovzm{m)Qrw(_Ej^uzwx5SjhDIf1v=3r?RR<)7ZcTxr9`UQ%z0#>^WD~5LzmK=r)H2@sU4Nk&;@oSenx9&d_==twfQ?K{rZ%0*4H4yrKzsDfNBdALL57NOqN?!ebG<5S{QH*yQB5}j4- z^%u!c=x~y)mrp8n&R~Er8JZBqToA9Aa|m94p}Swx>I*rh&TIkqKzcJ=;<9t9mH&d( zUdK(DG=nWkt^nxvd}-6lY2W3jFZ$S81K+aQ#%^oh=_oth3NHuwUqhXSqpnQ4qrGga zo8WnBT?*|6y^q=Efi<6uz4u4%$EvVtu{qs>jiP#{Qeo06E>oR9b$;7UM?J`A`Ws0Bv+?h#Dbz)gpIG?FHMjqmz^rrQaDeuA64XnL)uWabTfJvtq5Y z2?rMmg<>b+8fGkh#Jhdo(nWH&CAXS_Zz%dlWe}0 zAm-~7I zFDBv`!nYaK)1!xYxj^LVvLwn@=sqzG9se?_Vh1kI6W32|>Q5e>v-r#t1~~981$r?< zjMIe)FWAazBX`wVqs~EF)ke8yO5=9(;e!PYTvfu~-z3MR$dOXauWUfw;l7l_>?HeHypmUA<82 zDGfYyHL$l2{4L&Lq-9C5SuNFeXzJiI6xf=C|AQ5$!7h=E9f)uVAAtqO>1NjVC-jMv1zMG8c-f2U5?{Es#<#?APg%-c?~3xuAC>;D&}6m=DbP{8OF>BUd6Hjp^{## zy@h^Er-@Nsou-&!BxmIA2-?)xQ&Ka)K@%}RqiQQWB zyIYw|@b27RoMe{y1cR0s(7}No!INDdv4Cv~Z8n65jF8o&+{JSVWG^C)C9hHAj0Wb) zvh;(h=#yZu9)t_xi$T-kyWtqZCP^gnk~-O%&Mm8|bi!JbC(4I{IOug@vy*S@FuZ^k zq{>xXL#iCm&aFVY(V_MUn6~NW-htfUeB&@E48Ka`-)Gc1rCx&OVYk)vR-+~dFt@;H zR~tx>)%1g};}Q2K8aHy68L|}BM*@F0Mmnc2kE$}WW^M4x&^dHbe@3-(CecKV)ZPds z3KrZ`f-bj;c^E2`n&@a9WcF{FyX60B@UQdD*C zh%eBkT8!7DO^U(1!gG>_T+*!bP!C#lFzL8_L_1%W!_}cvp1LjAChfPVcmmaP5$f~@ zlV(%LZ}~#T%d!BE-f0MRjWQbAe>SAX;LGj-8c~=zt)1!kwi?(w5fO5by-H)Q-q8fB z=N?Z!IRyDoZr&LN^XEQK^$MDElCB>}f3(W{aOTc&dJ$mrG?r#;VWPZQ%8?i`1G-2#!?(D2t2c=i-3U)SCk z!MOZc{Ap+tNB>%1UZC(P<3pP34~t>hqwo)mbDR%$;k~BY4-QcORMa|G+~wh%Lefk$y1cZME)05&oScHkPC6+4#liit*JGkj=Jq8rf+1K7G6T-+40|M6si z+lso6VD?bI9%aY9foYIka;Y0mW2)6YU4vPqd)oC*kQ3+lcLv-SY7vqWjikpNMV4%! zp^Y+NM)KVt=62C5{_k+}{Si*oC7f)!g&Shm6xiwKd%8ki*`}MHKjG5*CKI*Cb$lb` zLaDP4*Ze*Q`<8KE2k_b@@^JVb!+$e{!s02UD_VBiu?jFU*ou6aNSqS<;I#C-+`-vQdZ_m| zj$-Y927~xj0)5O!(VW-2bC`30Ly9m(WySCJze^fZ@@5pH#Jf!tBO3F}#w0WxdH$)5 zS^RZFF1w#~^$P{oJ~k!tMva`LH$zWcpj23OMdBrQg!jwk;NB20xDMh&SMkujpJL(& zmOVZmV9_V&VMz{`-NW+y9b*K1HO|!CRq^~w1cvMr0LuiH-dDeHbAXe7MWruvkk6ki ze|lzsNRmGZ|B@T-(i`PZSIM5gwYT_Ono&98%1=v=OZT^k8~zy%;RBb?37-^m{*4%d z!6;^bWng5R;0HHyv(oiPCpwLv6OOP7Xx2`6Cjmxu@%&w!5_J_$95+_X$-T1=cZU*| z;jEvSf%nV<%G@msi)d{sYB?aqR}28F__D12Eqie#Fpta^scXu#u(?VmJ zh|6V>H#2w3&O6uIfSr{bnSQE_O;`W{3o(iUW5E<{kxW~RyF%* zmF??L`W8$cys`I!8V}~a`;-TheN()fCshh zWTpu(H_R+m1+Hvf4_75ugS-6^C~rc7XcXC{={AQ#%@0BgX?fBjmPOa7&YJp{?R2je z(E7ziZkpeP}-h^n3g z=PcvekCqjWT>DRI@oaL@n4&)BUoYqj z4IuWK3P~>>lh4>3%+7@G%9MdaU5KE1pTtk8A^~ zpLm#~8PHouXV8LN%>{wy?#SAaN4PlyQ9|)gW?y{-1OvX)6!^LJMq!v%wCLF~$zM`} zBB&D+aq$`&PQhR1DUwzP#w`P*^9q!L8y)G`rPQt%X6cem6<7|B=Q`2AWNyB5)F)|@ zX9|`EZ`6v1rL3`I;afLXes8hNA~Z7i*_PvPHUMhoxV-u6oLKjfrjO{3A5Hno_+hlQ z;0I;!R}foC3EdImJIw?wRC#?~seqKYM{Hsf8c%9~3#3_1F?KUjBBP44y`#qDL`SN9 zzM?P*VNK!p6TlIAfu$3O{PJvMkNqdqKGcFW|J3r_B|lS^^&F%7{aj<}#pJ{;J?OB# z2yr*27ewL(Q;t`rKt71Ar^MIag3U|A^O16~4Q#nr#9^m~t%>F6vMhtPJ7h;U^hXEz z!3(C~s5(gWZ{=0lI+S$X%cShsKOu3!9PMlTV#i64>53OGjW<}}H}P%5CO}yLKBPP3 zr-YD8#-*wyenrvJ%c1C=c=t&Bo6W&;j8cD?x9;w-zkt%L>hqk!MTy2mkv&Lr z+zzZzFJb*i}oikZz`{GAnGv``kV`_UGZu8 zAr%>XV2l9W%(bz*2u)48q>s22jALTu1r;8RdXO!=KzO(;-icGuB$w3%-AJ&QaRf^=Nk$;hBeo>z?LEhmV1SD)=?hd=hhMNW)5cOy#!Oc z5=KbRO8e%BJEaROsBC^-w+ns3i5!6OSI>_o=H!tX)U7o{KYE8-O}wR z6>Ybb^#aw2RR3V%T-d(|__N)7b0ha-f|_BJHR%C>XHcD>(M$@fo@SM7FWV5*=#bt` zKrF-o3gz!vZXs`Rsjjq@lLS;>MvZ9qo7Mh*NN|2oXy3o&!TmA82dap4=yy0lg0W8- zN_~Oozi_D1=3mc|sX)dXBkkpQ`TR2uPP(gIa&kZCNnz5GEPqSOO+t(?$N2dRhDKPx z&hf8<9-By)#U1=JeQIyA>+|;`{}5f{nBf=N84OZeQZcxT1+82kk`GebJouY;p~DAx zx%V9C{f1X-xXUMW%I)O~{=ns|Gzh9AD1{5u{dN9@@otZoFAu(?7Sr8}-MRyY`|)|6 zL%$plRzX#aZtG@3j%IaJc9Js|XL4XC4+SV4bcZr%P{k|pcvg*ay+T0@Y0$I^YR1Le z)pryY?2$AImlV`rQ^dp%bhPvxde9Gb^@@0tkQ?@jo{a7}K*^_J=lx0b1KZaH-YTykugF1=?-P!{s_#b$ z101oUUP2fa?pq|F2dUhf0rtfk1|k$#Z%=e+`n3#DtD1krv0I8)I^&v_$OGxmk`?>! ztjTyOgZp0~y_xmJT=!*7ti1!Al67tiTm7sZ=opzkD@bpnjvfgf@Fqsw15jSE!+%`T z|4aA2xy_?15W<>qc;t)I)TScGKt;TBDNv5aZ)wlf;|7otZ#7b@m~_J>Pbq-28`|h= z2&c)^LK;&#U}6aInyo8YW2fA%AyC9`C{k(gQEZySud?-DlUi@+Ln~Ex!;A?Qz>u07 zbR7}kn1m5?xxsKcCfJPXW-^q^NzlWpOhAgZV0G~>m0&m+>e!=0X8F8>*GiUABIRGq z64~Ig`gXB}=b$C>Vc{X+hhbpj;8g-XMPM2gxH~hY#Q{{gRB}~(9K_OPnSJoUVvsBJ z!I(8RGl$SPXrnTHNcTbt%sbSXN<>9&5L^+P6h}aNMtNW7$up~tP5GuMS(0>8`m5$@|#B%W}sN)QW#%r|TWl#R@ zWcLnIqQmn_8aamVzw8ClMZhRy;{=}i7Fn1?ObImNn|y&&IKCjSTSL|kzppui%O97Y zS+rK)?Z~0h(x~^WdN`i3gSc=ss?OdSl*?!##OIBw3S6=!Q5NWA$USliTFx@g=oDhT z^onX1VyVyASkHP`(;*V+iv26Qs2!vkd5D#@bDedfIWjJwgxUkv90Ce1nPur}Qc_GE z5aBde{91|<9bMz+Wr8EeQcMD{Fc zhLjIcxGFm_QWRy0EGb(=X-u~4k$vq`;|uv;^DX6n=b7i8bMEh)`_6mc_j%4U_kE9= zn!8C<42C>=E)wfBHS-=zaUc7Xlh+j>{fMG#9-{k?OwUA2?{V zsObP4UtD$rP-dwb8D!R$sS`rwEYcG9= zi!LP@3_VC76gja)uF-Bw1YEXjEfIX#-JOwfG{}Rpyz_-ilcvj-D4!D@!|Ute&|Poi z9xH1-IU|r z;}+Kms#AB874av^zOH3UJqnPU7qPBrllSlzJy+~jl+$tCWeriN|InqwPDV>+_=`dvjd?CDp}fe;@B6Tihr=M;4W&!eqg~nh&bYAvPmO9D0Y<}PnYMjMU!ivloY_X zUy9mg&I-lo9-{SECr;McxJYa}@-r!5wE6pD2vdeS798|y#f|}A%DrrgOpQrB=hmv} z6C!2e8d&sxRJbuPIhg%nf*09kzB@km>aE{pCk#ui_F5UtnOBI$C_QE z;y1!P(2XAo2H%AFnP{FLF#k*3=1^V>S?p1_X8G_3z7F|ysiB`|UhdxTaqbm$UonNL}{Qnxx1bT4#vXKPty9PmH5o9eI8uO@%cq}kFQ)W@uw&1msBuYISN%Vl#m zOn&-ajlgYm`7okqKs2uQ^>7x}Cw{Z`dQ6yAv-p|;Q=ZF>vsj+?Vo~hv9{rBVO6gI~ z^%J=oa?Xr$Yik8@a&en3haFS>IUNyGQjfPS`Jtsfju+5PTJGwD8d|IOy+zc#uz zK7IYX(fq0H#nQ&7&%N3n->*e=bX$I@OT|b=C{*jiBGw;Al}YN<_~_PHr==DioXd4g zzY-kb{Rm%D?}@pQTIcgi;QOJ}~AK?)A( zmz=dA1Q~SRv3TAyZOw_Bs&3+aI-umDuIQTHmUaOX(qf^kY;jbnd~k8LvsBS5(``=E zr`+p#Kh9#gaoy%-b#uf>R_CzPIg$cbzq;CwGOOE7!V{>uWqMY~vgb)wnKr##QZC zlMV!Bqi+RcB(XIX3@w`mry$#ki0C_$j*g!+-u7{@*>r>~@>Yg*IG8{%wN3hyiI*cx zznwRmYh9SvYulzB3^Q!U%(!C{q(_%WI~4KPtlg&1=G%>RyY!L_>H5;vas7Ys;?HOq zbZ*j>i>(>)Ho44OHlQayZplToZ&t^RcMvvZ@Zz5-iZk-2JSVg8W~~Bl+iYCC<&&n0 zKNAP&HeM{fy`GWzF@bl~>Zb8Bi&eVj9k0FvD4(-506kwl{MMz5?jZwtI=EOI=2S?_ ztuoweJ*BHH&kE0XCLaCn8<=&yX~Kkec(XLI>DlnGkL-_2m*8{0D>;f|%|}W&nk0J( zIUx@Z-QqchYaMsrj^R*>CYTQe-pGzO=h9P>T9_LMo}yo0_C0>|UVX{Tvbx<({G+_E z+mv*~8@(*}HzyozcJRe=x&DfYtUE&lbkBp}l9FeX;GG@wDGv`mwUele3-4s2tQzW1 zEq71N$zc(cDr{3dd3+RY=#>!IX%<&U@RYRv=#Jy?Cw_b9cglFIQEX(F{Gy>OHJ~Xu zydW}g7q&e7qj^N5VTC6yDR38lf{(6Etk!Ait4KXJX0u=WSz--YpsUK*siE&e{%$dvp$8>=!Vt;m z3Our_qOXFdHJqHEIApiQYI|Shy@dK$)Me(0nBy!o0tJ?QpR0bZm4&3@KOQwekol|4 zkUgXYd?sCH%sTS=fPLTX4-zD1X5lE|uym0jTywZ_Z#N8g@|7^LvyPp%mBTOpNp!DC zRg}bilrDpwJy9q3`R!JAvJ1Nx!FVg(4-`$A$yz_fwXAfYV%?49_0XnGLF|W&SIVuH zKc(z--Y66Mkg8sy)+Da^2hM_;I$Y{X8@Ws?bNqvad_#qchMn#AKPV&05C<5vl{PFF z#+cEkT$#7cR_?&R{qHQLyv8A}GZVax^H=L43)dr*4%h3Vw$<0O2QW3O4XkS;2 zu;OLDDJ3E%TfONq7A=q;7w3XEzo@`vd*Gl>5eiJSLRZTepio@06(-_PMuYg0q%DmxihFyzy)3AfWCX!g+OOFA9op7A1@FK=V$)%i-#p65u*Ev$8Z5qdjwgj z@`-n^>=P379_9gG`f-A5Gm3yPElBF%f>b29HOdOog|IS1-qeE_oy173+9Dsq2yV`> zqNtUkpkgkAP&T0TF9n^6(f&+SaCuf4wZH1iFNzp_5b;k97)jLr!XCdvA*>K&0^AU! zvMlqOK!^~vh4U$bpf1$Hw0R^DDM2-@r6a-dc~;cE?LfbRLG99%`Ul+G9V7&0BUcwsTEYKCYBurL+|tBRi;Az5cK=95Se{Y`AI7DY5{}#@AW)H2=0R}YyA_6 z0Yop#1J+hl717AN_hV4hyq5+BTZPu09N*_hYYSyshN%6?*Drz?J#&tlib*cX1I{i` zP(?6l2?GRN04Lwcfy0Ze;N}t%@bQLJ6ez#U3Z5QCp#B?WP$1!7!Jv+M69h`bMZx)D z3?Lp1X2YdG%@GXX9}N18r~!GwU><@Olwn4KBajsn3aRRl$O>AHB7xy6V8EyY5O5Vd ztBwMfMo$1FJXM^XMuCoFNPy=CqzZ%OD2BaJ1bQ9$eTV_EAZhFZ@E{&KNq{cnDnLde z6@NLd3#=wnMTrShARnZP*%Nv|S0+_tnA8Q{=0Kt(=&8gFCQV|1o_wn6dz=+CnP3I= z)PBFycp>%TRW&w2JtA3~xW7^(TU>#->$@TL?p34@M0e> zCcZMNi9DqbSU!V9G^jBx50tln0n_F{Ll>yvDhry9L7naK`<*A=g92YXfT|Cmq^H_m zze)W6eE#=Y5TpO--8Z{G4lAJcrt`lCF?wh}@EFd=ZDvFVtyzM>l%dZ8G?w|vd)og1 D86?w= diff --git a/js/react_native/android/gradle/wrapper/gradle-wrapper.properties b/js/react_native/android/gradle/wrapper/gradle-wrapper.properties index 51d930a381f3a..012d6d90445b4 100644 --- a/js/react_native/android/gradle/wrapper/gradle-wrapper.properties +++ b/js/react_native/android/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=7faa7198769f872826c8ef4f1450f839ec27f0b4d5d1e51bade63667cbccd205 -distributionUrl=https\://services.gradle.org/distributions/gradle-6.8.3-bin.zip +distributionSha256Sum=cb87f222c5585bd46838ad4db78463a5c5f3d336e5e2b98dc7c0c586527351c2 +distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-bin.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/js/react_native/android/gradlew b/js/react_native/android/gradlew index fbd7c515832da..a69d9cb6c2065 100755 --- a/js/react_native/android/gradlew +++ b/js/react_native/android/gradlew @@ -1,7 +1,7 @@ -#!/usr/bin/env sh +#!/bin/sh # -# Copyright 2015 the original author or authors. +# Copyright © 2015-2021 the original authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,67 +17,101 @@ # ############################################################################## -## -## Gradle start up script for UN*X -## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# ############################################################################## # Attempt to set APP_HOME + # Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null + +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` +APP_BASE_NAME=${0##*/} # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' # Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" +MAX_FD=maximum warn () { echo "$*" -} +} >&2 die () { echo echo "$*" echo exit 1 -} +} >&2 # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar @@ -87,9 +121,9 @@ CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACMD=$JAVA_HOME/jre/sh/java else - JAVACMD="$JAVA_HOME/bin/java" + JAVACMD=$JAVA_HOME/bin/java fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME @@ -98,7 +132,7 @@ Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else - JAVACMD="java" + JAVACMD=java which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the @@ -106,80 +140,101 @@ location of your Java installation." fi # Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac fi -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. # For Cygwin or MSYS, switch paths to Windows format before running java -if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) fi - i=`expr $i + 1` + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg done - case $i in - 0) set -- ;; - 1) set -- "$args0" ;; - 2) set -- "$args0" "$args1" ;; - 3) set -- "$args0" "$args1" "$args2" ;; - 4) set -- "$args0" "$args1" "$args2" "$args3" ;; - 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac fi -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=`save "$@"` +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' exec "$JAVACMD" "$@" diff --git a/js/react_native/android/gradlew.bat b/js/react_native/android/gradlew.bat index 5093609d512a9..f127cfd49d402 100644 --- a/js/react_native/android/gradlew.bat +++ b/js/react_native/android/gradlew.bat @@ -14,7 +14,7 @@ @rem limitations under the License. @rem -@if "%DEBUG%" == "" @echo off +@if "%DEBUG%"=="" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @@ -25,7 +25,7 @@ if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. +if "%DIRNAME%"=="" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @@ -40,7 +40,7 @@ if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init +if %ERRORLEVEL% equ 0 goto execute echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. @@ -54,7 +54,7 @@ goto fail set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe -if exist "%JAVA_EXE%" goto init +if exist "%JAVA_EXE%" goto execute echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% @@ -64,21 +64,6 @@ echo location of your Java installation. goto fail -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - :execute @rem Setup the command line @@ -86,17 +71,19 @@ set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* :end @rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd +if %ERRORLEVEL% equ 0 goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% :mainEnd if "%OS%"=="Windows_NT" endlocal diff --git a/js/react_native/e2e/android/app/build.gradle b/js/react_native/e2e/android/app/build.gradle index 8a84b0d5065a8..526259e3f8d8f 100644 --- a/js/react_native/e2e/android/app/build.gradle +++ b/js/react_native/e2e/android/app/build.gradle @@ -193,7 +193,7 @@ dependencies { implementation "com.facebook.react:react-native:+" // From node_modules implementation "androidx.swiperefreshlayout:swiperefreshlayout:1.0.0" - implementation 'androidx.test.ext:junit:1.1.3' + implementation 'androidx.test.ext:junit:1.1.5' debugImplementation("com.facebook.flipper:flipper:${FLIPPER_VERSION}") { exclude group:'com.facebook.fbjni' } @@ -213,9 +213,9 @@ dependencies { implementation jscFlavor } - androidTestImplementation 'androidx.test.espresso:espresso-core:3.4.0' - androidTestImplementation 'androidx.test:runner:1.4.0' - androidTestImplementation 'androidx.test:rules:1.4.0' + androidTestImplementation "androidx.test.espresso:espresso-core:3.5.0" + androidTestImplementation "androidx.test:runner:1.5.2" + androidTestImplementation "androidx.test:rules:1.5.0" implementation project(':onnxruntime-react-native') // specify ORT dependency here so it can be found in libs flatDir repository diff --git a/tools/ci_build/github/android/build_aar_package.py b/tools/ci_build/github/android/build_aar_package.py index 19f66245a45e2..1b34b3d302e57 100644 --- a/tools/ci_build/github/android/build_aar_package.py +++ b/tools/ci_build/github/android/build_aar_package.py @@ -23,11 +23,11 @@ # Onnx Runtime native library is built against NDK API 21 by default # It is possible to build from source for Android API levels below 21, but it is not guaranteed -DEFAULT_ANDROID_MIN_SDK_VER = 21 +DEFAULT_ANDROID_MIN_SDK_VER = 24 # Android API 24 is the default target API version for Android builds, based on Microsoft 1CS requirements # It is possible to build from source using API level 21 and higher as the target SDK version -DEFAULT_ANDROID_TARGET_SDK_VER = 24 +DEFAULT_ANDROID_TARGET_SDK_VER = 34 def _parse_build_settings(args): diff --git a/tools/ci_build/github/android/default_full_aar_build_settings.json b/tools/ci_build/github/android/default_full_aar_build_settings.json index b0eff75812673..1c7769c623d41 100644 --- a/tools/ci_build/github/android/default_full_aar_build_settings.json +++ b/tools/ci_build/github/android/default_full_aar_build_settings.json @@ -5,8 +5,8 @@ "x86", "x86_64" ], - "android_min_sdk_version": 21, - "android_target_sdk_version": 24, + "android_min_sdk_version": 24, + "android_target_sdk_version": 34, "build_params": [ "--enable_lto", "--android", diff --git a/tools/ci_build/github/azure-pipelines/templates/react-native-ci.yml b/tools/ci_build/github/azure-pipelines/templates/react-native-ci.yml index d8ea1c35c89c4..29c5f6bb34d7a 100644 --- a/tools/ci_build/github/azure-pipelines/templates/react-native-ci.yml +++ b/tools/ci_build/github/azure-pipelines/templates/react-native-ci.yml @@ -261,8 +261,6 @@ stages: publishJUnitResults: true testResultsFiles: '**/TEST-*.xml' testRunTitle: 'React Native Android Instrumented Test results' - javaHomeOption: 'path' - jdkDirectory: '$(JAVA_HOME_11_X64)' sonarQubeRunAnalysis: false spotBugsAnalysis: false displayName: Run React Native Android Instrumented Tests From 5c644d3747db64ea12d9987991af68a70df8fbae Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Tue, 3 Dec 2024 14:40:57 -0800 Subject: [PATCH 03/22] [WebGPU EP] Flatten implementation (#22964) Implements flatten operator for native webgpu. --- .../core/providers/webgpu/tensor/flatten.cc | 52 ++++++++++++++++ .../core/providers/webgpu/tensor/flatten.h | 62 +++++++++++++++++++ .../webgpu/webgpu_execution_provider.cc | 13 ++-- 3 files changed, 122 insertions(+), 5 deletions(-) create mode 100644 onnxruntime/core/providers/webgpu/tensor/flatten.cc create mode 100644 onnxruntime/core/providers/webgpu/tensor/flatten.h diff --git a/onnxruntime/core/providers/webgpu/tensor/flatten.cc b/onnxruntime/core/providers/webgpu/tensor/flatten.cc new file mode 100644 index 0000000000000..81d28bd3c0fa7 --- /dev/null +++ b/onnxruntime/core/providers/webgpu/tensor/flatten.cc @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "core/providers/webgpu/tensor/flatten.h" +#include "core/providers/webgpu/webgpu_execution_provider.h" +#include "core/providers/webgpu/webgpu_supported_types.h" + +namespace onnxruntime { +namespace webgpu { + +ONNX_OPERATOR_VERSIONED_KERNEL_EX( + Flatten, + kOnnxDomain, + 1, 8, + kWebGpuExecutionProvider, + (*KernelDefBuilder::Create()).TypeConstraint("T", WebGpuSupportedFloatTypes()).InputMemoryType(OrtMemTypeCPU, 1), + Flatten); + +ONNX_OPERATOR_VERSIONED_KERNEL_EX( + Flatten, + kOnnxDomain, + 9, 10, + kWebGpuExecutionProvider, + (*KernelDefBuilder::Create()).TypeConstraint("T", WebGpuSupportedFloatTypes()).InputMemoryType(OrtMemTypeCPU, 1), + Flatten); + +ONNX_OPERATOR_VERSIONED_KERNEL_EX( + Flatten, + kOnnxDomain, + 11, 12, + kWebGpuExecutionProvider, + (*KernelDefBuilder::Create()).TypeConstraint("T", WebGpuSupportedFloatTypes()).InputMemoryType(OrtMemTypeCPU, 1), + Flatten); + +ONNX_OPERATOR_VERSIONED_KERNEL_EX( + Flatten, + kOnnxDomain, + 13, 20, + kWebGpuExecutionProvider, + (*KernelDefBuilder::Create()).TypeConstraint("T", WebGpuSupportedFloatTypes()).InputMemoryType(OrtMemTypeCPU, 1), + Flatten); + +ONNX_OPERATOR_KERNEL_EX( + Flatten, + kOnnxDomain, + 21, + kWebGpuExecutionProvider, + (*KernelDefBuilder::Create()).TypeConstraint("T", WebGpuSupportedFloatTypes()).InputMemoryType(OrtMemTypeCPU, 1), + Flatten); + +} // namespace webgpu +} // namespace onnxruntime \ No newline at end of file diff --git a/onnxruntime/core/providers/webgpu/tensor/flatten.h b/onnxruntime/core/providers/webgpu/tensor/flatten.h new file mode 100644 index 0000000000000..5fc49a844b404 --- /dev/null +++ b/onnxruntime/core/providers/webgpu/tensor/flatten.h @@ -0,0 +1,62 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include "core/framework/op_kernel.h" +#include "core/providers/cpu/nn/flatten.h" +#include "core/framework/data_transfer_manager.h" + +namespace onnxruntime { +namespace webgpu { + +class Flatten final : public OpKernel { + public: + explicit Flatten(const OpKernelInfo& info) : OpKernel{info} { + axis_ = info.GetAttrOrDefault("axis", 1); + } + + Status Compute(OpKernelContext* context) const override { + const Tensor* input_tensor = context->Input(0); + const TensorShape& input_shape = input_tensor->Shape(); + int64_t input_rank = input_shape.NumDimensions(); + + // Handle negative axis + int64_t axis = axis_; + if (axis < 0) { + axis += input_rank; + } + + if (axis > input_rank) { + return Status(common::ONNXRUNTIME, common::FAIL, "Invalid value for axis, must be less than or equal to input_rank"); + } + + int64_t first_dim = 1; + for (int64_t i = 0; i < axis; i++) { + first_dim *= input_shape[i]; + } + + int64_t second_dim = 1; + for (int64_t i = axis; i < input_rank; i++) { + second_dim *= input_shape[i]; + } + + TensorShape output_shape({first_dim, second_dim}); + Tensor* output_tensor = context->Output(0, output_shape); + + const void* source = input_tensor->DataRaw(); + void* target = output_tensor->MutableDataRaw(); + // If source and target pointers are not equal (non-inplace operation), we need to copy the data. + if (target != source) { + ORT_RETURN_IF_ERROR(Info().GetDataTransferManager().CopyTensor(*input_tensor, *output_tensor)); + } + + return Status::OK(); + } + + private: + int64_t axis_; +}; + +} // namespace webgpu +} // namespace onnxruntime \ No newline at end of file diff --git a/onnxruntime/core/providers/webgpu/webgpu_execution_provider.cc b/onnxruntime/core/providers/webgpu/webgpu_execution_provider.cc index 821c60ab602ea..90b6862758bd7 100644 --- a/onnxruntime/core/providers/webgpu/webgpu_execution_provider.cc +++ b/onnxruntime/core/providers/webgpu/webgpu_execution_provider.cc @@ -347,7 +347,8 @@ class ONNX_OPERATOR_KERNEL_CLASS_NAME(kWebGpuExecutionProvider, kOnnxDomain, 13, class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kWebGpuExecutionProvider, kOnnxDomain, 1, 8, Flatten); class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kWebGpuExecutionProvider, kOnnxDomain, 9, 10, Flatten); class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kWebGpuExecutionProvider, kOnnxDomain, 11, 12, Flatten); -class ONNX_OPERATOR_KERNEL_CLASS_NAME(kWebGpuExecutionProvider, kOnnxDomain, 13, Flatten); +class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kWebGpuExecutionProvider, kOnnxDomain, 13, 20, Flatten); +class ONNX_OPERATOR_KERNEL_CLASS_NAME(kWebGpuExecutionProvider, kOnnxDomain, 21, Flatten); class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kWebGpuExecutionProvider, kOnnxDomain, 6, 12, Tile); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kWebGpuExecutionProvider, kOnnxDomain, 13, Tile); @@ -667,10 +668,12 @@ std::unique_ptr RegisterKernels() { // BuildKernelCreateInfo, // BuildKernelCreateInfo, - // BuildKernelCreateInfo, - // BuildKernelCreateInfo, - // BuildKernelCreateInfo, - // BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, BuildKernelCreateInfo, From d3bc3180d891a4ec1d49d437bfad0a95471e5c7d Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Tue, 3 Dec 2024 16:07:43 -0800 Subject: [PATCH 04/22] [js/node] fix CUDA artifact installation script for Linux/x64 (#22984) ### Description This PR updates installation script to fix it for CUDA v12. However, it may be difficult for CUDA v11 since the steps are quite complicated to automate. Added a few lines of instructions instead. fixes #22877 --- js/node/script/install.js | 44 +++++++++++++++++++++++++++++++++------ 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/js/node/script/install.js b/js/node/script/install.js index b15bc03840599..fef93f9169a2c 100644 --- a/js/node/script/install.js +++ b/js/node/script/install.js @@ -21,6 +21,7 @@ const os = require('os'); const fs = require('fs'); const path = require('path'); const tar = require('tar'); +const { execFileSync } = require('child_process'); const { Readable } = require('stream'); // commandline flag: @@ -58,10 +59,23 @@ if (NO_INSTALL || !shouldInstall) { // Step.2: Download the required binaries const artifactUrl = { - 11: `https://github.com/microsoft/onnxruntime/releases/download/v${ORT_VERSION}/onnxruntime-linux-x64-gpu-${ - ORT_VERSION - }.tgz`, - 12: `https://github.com/microsoft/onnxruntime/releases/download/v${ORT_VERSION}/onnxruntime-linux-x64-gpu-cuda12-${ + get 11() { + // TODO: support ORT Cuda v11 binaries + throw new Error(`CUDA 11 binaries are not supported by this script yet. + +To use ONNX Runtime Node.js binding with CUDA v11 support, please follow the manual steps: + +1. Use "--onnxruntime-node-install-cuda=skip" to skip the auto installation. +2. Navigate to https://aiinfra.visualstudio.com/PublicPackages/_artifacts/feed/onnxruntime-cuda-11 +3. Download the binaries for your platform and architecture +4. Extract the following binaries to "node_modules/onnxruntime-node/bin/napi-v3/linux/x64: + - libonnxruntime_providers_tensorrt.so + - libonnxruntime_providers_shared.so + - libonnxruntime.so.${ORT_VERSION} + - libonnxruntime_providers_cuda.so +`); + }, + 12: `https://github.com/microsoft/onnxruntime/releases/download/v${ORT_VERSION}/onnxruntime-linux-x64-gpu-${ ORT_VERSION }.tgz`, }[INSTALL_CUDA_FLAG || tryGetCudaVersion()]; @@ -108,9 +122,27 @@ Use "--onnxruntime-node-install-cuda=skip" to skip the installation. You will st function tryGetCudaVersion() { // Should only return 11 or 12. - // TODO: try to get the CUDA version from the system ( `nvcc --version` ) + // try to get the CUDA version from the system ( `nvcc --version` ) + let ver = 12; + try { + const nvccVersion = execFileSync('nvcc', ['--version'], { encoding: 'utf8' }); + const match = nvccVersion.match(/release (\d+)/); + if (match) { + ver = parseInt(match[1]); + if (ver !== 11 && ver !== 12) { + throw new Error(`Unsupported CUDA version: ${ver}`); + } + } + } catch (e) { + if (e?.code === 'ENOENT') { + console.warn('`nvcc` not found. Assuming CUDA 12.'); + } else { + console.warn('Failed to detect CUDA version from `nvcc --version`:', e.message); + } + } - return 11; + // assume CUDA 12 if failed to detect + return ver; } function parseInstallCudaFlag() { From 4497c97d54de76fdb9bf652c08442b9295700878 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 18:48:22 -0800 Subject: [PATCH 05/22] Bump cross-spawn from 7.0.3 to 7.0.6 in /js/node (#22998) Bumps [cross-spawn](https://github.com/moxystudio/node-cross-spawn) from 7.0.3 to 7.0.6.
Changelog

Sourced from cross-spawn's changelog.

7.0.6 (2024-11-18)

Bug Fixes

  • update cross-spawn version to 7.0.5 in package-lock.json (f700743)

7.0.5 (2024-11-07)

Bug Fixes

  • fix escaping bug introduced by backtracking (640d391)

7.0.4 (2024-11-07)

Bug Fixes

Commits
  • 77cd97f chore(release): 7.0.6
  • 6717de4 chore: upgrade standard-version
  • f700743 fix: update cross-spawn version to 7.0.5 in package-lock.json
  • 9a7e3b2 chore: fix build status badge
  • 0852683 chore(release): 7.0.5
  • 640d391 fix: fix escaping bug introduced by backtracking
  • bff0c87 chore: remove codecov
  • a7c6abc chore: replace travis with github workflows
  • 9b9246e chore(release): 7.0.4
  • 5ff3a07 fix: disable regexp backtracking (#160)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=cross-spawn&package-manager=npm_and_yarn&previous-version=7.0.3&new-version=7.0.6)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) Dependabot will merge this PR once CI passes on it, as requested by @fs-eire. [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/microsoft/onnxruntime/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- js/node/package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/js/node/package-lock.json b/js/node/package-lock.json index 239c0b1ba557b..1f95b0c206ffa 100644 --- a/js/node/package-lock.json +++ b/js/node/package-lock.json @@ -455,9 +455,9 @@ "dev": true }, "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -1725,9 +1725,9 @@ "dev": true }, "cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "requires": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", From e84b8e7bd57ceca32ae34d6b44b334697d223874 Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Tue, 3 Dec 2024 19:25:22 -0800 Subject: [PATCH 06/22] allow specify a custom local source path for Dawn (#22999) ### Description Allows to build ONNX Runtime with a custom local path of Dawn's source code. Usage: ```sh build --use_webgpu --cmake_extra_defines "onnxruntime_CUSTOM_DAWN_SRC_PATH=C:/src/dawn" ``` --- cmake/CMakeLists.txt | 1 + .../external/onnxruntime_external_deps.cmake | 23 ++++++++++++++----- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt index 70ac62954ad6d..7710ab2f4cac7 100644 --- a/cmake/CMakeLists.txt +++ b/cmake/CMakeLists.txt @@ -148,6 +148,7 @@ option(onnxruntime_USE_XNNPACK "Build with XNNPACK support. Provides an alternat option(onnxruntime_USE_WEBNN "Build with WebNN support. Enable hardware acceleration in web browsers." OFF) option(onnxruntime_USE_WEBGPU "Build with WebGPU support. Enable WebGPU via C/C++ interface." OFF) option(onnxruntime_USE_EXTERNAL_DAWN "Build with treating Dawn as external dependency. Will not link Dawn at build time." OFF) +option(onnxruntime_CUSTOM_DAWN_SRC_PATH "Path to custom Dawn src dir.") # Options related to reducing the binary size produced by the build # XNNPACK EP requires the internal NHWC contrib ops to be available, so this option must be OFF when onnxruntime_USE_XNNPACK is ON diff --git a/cmake/external/onnxruntime_external_deps.cmake b/cmake/external/onnxruntime_external_deps.cmake index d9e833a2d8cd4..5e619af619f18 100644 --- a/cmake/external/onnxruntime_external_deps.cmake +++ b/cmake/external/onnxruntime_external_deps.cmake @@ -615,12 +615,23 @@ if (onnxruntime_USE_COREML) endif() if (onnxruntime_USE_WEBGPU) - FetchContent_Declare( - dawn - URL ${DEP_URL_dawn} - URL_HASH SHA1=${DEP_SHA1_dawn} - PATCH_COMMAND ${Patch_EXECUTABLE} --binary --ignore-whitespace -p1 < ${PROJECT_SOURCE_DIR}/patches/dawn/dawn.patch - ) + if (onnxruntime_CUSTOM_DAWN_SRC_PATH) + # use the custom dawn source path if provided + # + # specified as: + # build.py --use_webgpu --cmake_extra_defines "onnxruntime_CUSTOM_DAWN_SRC_PATH=" + FetchContent_Declare( + dawn + SOURCE_DIR ${onnxruntime_CUSTOM_DAWN_SRC_PATH} + ) + else() + FetchContent_Declare( + dawn + URL ${DEP_URL_dawn} + URL_HASH SHA1=${DEP_SHA1_dawn} + PATCH_COMMAND ${Patch_EXECUTABLE} --binary --ignore-whitespace -p1 < ${PROJECT_SOURCE_DIR}/patches/dawn/dawn.patch + ) + endif() # use dawn::dawn_native and dawn::dawn_proc instead of the monolithic dawn::webgpu_dawn to minimize binary size set(DAWN_BUILD_MONOLITHIC_LIBRARY OFF CACHE BOOL "" FORCE) From 06526af346adc5bd344809085cbd7862e7438511 Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Tue, 3 Dec 2024 20:21:08 -0800 Subject: [PATCH 07/22] [js/webgpu] fix a bug in transpose shader (#22997) ### Description Fix a bug in transpose shader, when input/output rank is 1. ### Motivation and Context Fixes #22994 --- js/web/lib/wasm/jsep/webgpu/ops/transpose.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/js/web/lib/wasm/jsep/webgpu/ops/transpose.ts b/js/web/lib/wasm/jsep/webgpu/ops/transpose.ts index 21225a77b189b..5059645211aea 100644 --- a/js/web/lib/wasm/jsep/webgpu/ops/transpose.ts +++ b/js/web/lib/wasm/jsep/webgpu/ops/transpose.ts @@ -29,7 +29,9 @@ const permFunctionBody = (perm: number[], rank: number, input: IndicesHelper, ou let reverseFunc = `fn perm(i: ${output.type.indices}) -> ${input.type.indices} { var a: ${input.type.indices};`; for (let i = 0; i < rank; ++i) { - reverseFunc += input.indicesSet('a', perm[i], `i[${i}]`); + // input indices and output indices should always be larger or equal to 2, + // so indexer is always valid to be used on `a` and `i`. + reverseFunc += `a[${perm[i]}]=i[${i}];`; } return (reverseFunc += 'return a;}'); }; @@ -71,7 +73,7 @@ export const createTransposeProgramInfo = (inputTensor: TensorView, permAttr: nu const outputShape = getOutputShape(inputTensor.dims, perm); let newInputShape = inputTensor.dims; let newOutputShape = outputShape; - const transposeAsReshape = isTransposeReshape(perm, inputTensor.dims); + const transposeAsReshape = inputRank < 2 || isTransposeReshape(perm, inputTensor.dims); let getShaderSource; if (transposeAsReshape) { getShaderSource = (shaderHelper: ShaderHelper) => { From bd701e4f33a4f56f4e04affbab68c9b95f6b8bff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 05:07:21 +0000 Subject: [PATCH 08/22] Bump cross-spawn from 7.0.3 to 7.0.6 in /js (#23003) --- js/package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/js/package-lock.json b/js/package-lock.json index 594d0584ad80e..f4401c6e98c75 100644 --- a/js/package-lock.json +++ b/js/package-lock.json @@ -1573,9 +1573,9 @@ "dev": true }, "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dev": true, "dependencies": { "path-key": "^3.1.0", @@ -5922,9 +5922,9 @@ "dev": true }, "cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dev": true, "requires": { "path-key": "^3.1.0", From 9b9f881475a12991d0abba0095c26dad8a4de5e9 Mon Sep 17 00:00:00 2001 From: Chi Lo <54722500+chilo-ms@users.noreply.github.com> Date: Tue, 3 Dec 2024 21:58:43 -0800 Subject: [PATCH 09/22] [TensorRT EP] Use TRT/CUDA/ORT version from runtime instead of build time to generate hash value (#22921) Use TensorRT and CUDA version fetched at **runtime** to get the hash value which determines the cache name. The old way to get the version is at compile/build time that might have some issues in some cases, ex: TRT EP uses the TRT version which we or users built against at compile time. However, users can change different TRT version at run time, that can cause issue because TRT EP always checks the "fixed" TRT version, not the TRT version it uses now. This can cause TRT EP to use incompatible TRT engine cache. see the github issue here: https://github.com/microsoft/onnxruntime/issues/22382#issuecomment-2404140754 --- .../providers/tensorrt/tensorrt_execution_provider.cc | 6 ++++-- .../providers/tensorrt/tensorrt_execution_provider.h | 1 + .../tensorrt/tensorrt_execution_provider_utils.h | 7 +++---- .../test/providers/tensorrt/tensorrt_basic_test.cc | 10 +++++++--- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc index d979d53347c4f..1b432dad44263 100644 --- a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc +++ b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc @@ -1726,8 +1726,10 @@ TensorrtExecutionProvider::TensorrtExecutionProvider(const TensorrtExecutionProv } trt_version_ = getInferLibVersion(); + CUDA_CALL_THROW(cudaRuntimeGetVersion(&cuda_version_)); LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] TensorRT version is " << trt_version_; + LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] CUDA version is " << cuda_version_; LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] TensorRT provider options: " << "device_id: " << device_id_ @@ -2466,13 +2468,13 @@ TensorrtExecutionProvider::GetCapability(const GraphViewer& graph, // So, simply return the ComputeCapability here. if (graph.NumberOfNodes() == 1 && GraphHasCtxNode(graph)) { SubGraph_t supported_node_vector = {{0}, true}; - std::unique_ptr sub_graph = GetSubGraph(supported_node_vector, graph, TRTGenerateId(graph), 0); + std::unique_ptr sub_graph = GetSubGraph(supported_node_vector, graph, TRTGenerateId(graph, std::to_string(trt_version_), std::to_string(cuda_version_)), 0); result.push_back(ComputeCapability::Create(std::move(sub_graph))); return result; } // Generate unique kernel name for TRT graph - HashValue model_hash = TRTGenerateId(graph); + HashValue model_hash = TRTGenerateId(graph, std::to_string(trt_version_), std::to_string(cuda_version_)); // Get supported node list from TensorRT parser const int number_of_ort_nodes = graph.NumberOfNodes(); diff --git a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h index 9e3a03417d917..d3e0b0fba8891 100644 --- a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h +++ b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.h @@ -333,6 +333,7 @@ class TensorrtExecutionProvider : public IExecutionProvider { // The format is as for TENSORRT_VERSION: (MAJOR * 100 + MINOR) * 100 + PATCH int32_t trt_version_; + int32_t cuda_version_; // The OrtAllocator object will be get during ep compute time // and should be kept for the lifetime of TRT EP object. diff --git a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider_utils.h b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider_utils.h index 95abcd1bad2b8..5a7b135fd92cd 100644 --- a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider_utils.h +++ b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider_utils.h @@ -520,7 +520,7 @@ void RemoveCachesByType(const std::string& root, std::string file_extension) { * compiled kernels, so the name must be unique and deterministic across models and sessions. * */ -HashValue TRTGenerateId(const GraphViewer& graph_viewer) { +HashValue TRTGenerateId(const GraphViewer& graph_viewer, std::string trt_version, std::string cuda_version) { HashValue model_hash = 0; // find the top level graph @@ -583,12 +583,11 @@ HashValue TRTGenerateId(const GraphViewer& graph_viewer) { #endif #ifdef CUDA_VERSION - hash_str(std::to_string(CUDA_VERSION)); + hash_str(cuda_version); #endif #if defined(NV_TENSORRT_MAJOR) && defined(NV_TENSORRT_MINOR) - std::string TRT_VERSION = std::to_string(NV_TENSORRT_MAJOR) + "." + std::to_string(NV_TENSORRT_MINOR); - hash_str(TRT_VERSION); + hash_str(trt_version); #endif model_hash = hash[0] | (uint64_t(hash[1]) << 32); diff --git a/onnxruntime/test/providers/tensorrt/tensorrt_basic_test.cc b/onnxruntime/test/providers/tensorrt/tensorrt_basic_test.cc index 63327a028c6f4..0022d7fc0e184 100644 --- a/onnxruntime/test/providers/tensorrt/tensorrt_basic_test.cc +++ b/onnxruntime/test/providers/tensorrt/tensorrt_basic_test.cc @@ -342,8 +342,12 @@ TEST(TensorrtExecutionProviderTest, TRTModelIdGeneratorUsingModelHashing) { Graph& graph = model->MainGraph(); GraphViewer viewer(graph); + std::string trt_version = std::to_string(NV_TENSORRT_MAJOR) + "." + std::to_string(NV_TENSORRT_MINOR); + std::string cuda_version = std::to_string(CUDA_VERSION); + std::string ort_version = ORT_VERSION; + // get the hash for the model when loaded from file - HashValue model_hash = TRTGenerateId(viewer); + HashValue model_hash = TRTGenerateId(viewer, trt_version, cuda_version); ASSERT_NE(model_hash, 0); // now load the model from bytes and check the hash differs @@ -358,7 +362,7 @@ TEST(TensorrtExecutionProviderTest, TRTModelIdGeneratorUsingModelHashing) { // Test loading same model from file and byte steam. Hash values should be different Graph& graph2 = model2->MainGraph(); GraphViewer viewer2(graph2); - HashValue model_hash2 = TRTGenerateId(viewer2); + HashValue model_hash2 = TRTGenerateId(viewer2, trt_version, cuda_version); ASSERT_NE(model_hash, model_hash2); // Test loading same model from different path, see if hash values are same as well @@ -367,7 +371,7 @@ TEST(TensorrtExecutionProviderTest, TRTModelIdGeneratorUsingModelHashing) { ASSERT_TRUE(Model::Load(model_path, model3, nullptr, DefaultLoggingManager().DefaultLogger()).IsOK()); Graph& graph3 = model3->MainGraph(); GraphViewer viewer3(graph3); - HashValue model_hash3 = TRTGenerateId(viewer3); + HashValue model_hash3 = TRTGenerateId(viewer3, trt_version, cuda_version); ASSERT_EQ(model_hash, model_hash3) << "model 1&3 are same models and they have same hash, no matter where they are loaded"; } From 50b38ca9d508d81e49524ca0c028c592fd9c9c79 Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Wed, 4 Dec 2024 09:46:45 -0800 Subject: [PATCH 10/22] [js/web] update default export to include webgpu (#22754) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Description This PR changes the following exports: - `onnxruntime-web` now is same to `onnxruntime-web/webgpu`. - `onnxruntime-web/webgpu` is deprecating. ### Migration instructions: - use `onnxruntime-web` instead of `onnxruntime-web/webgpu`. - use `onnxruntime-web/wasm` if want to use onnxruntime-web without webgpu/webnn. ### Export table | file name | export entry | includes WASM | includes JSEP (WebGPU & WebNN) | includes WebGL | ------------- | ------------- | ----- | ----- | ----- | ort.all.min.js
ort.all.js
ort.all.min.mjs
ort.all.mjs | `onnxruntime-web/all` | ✔️| ✔️| ✔️ | ort.min.js
ort.js
ort.min.mjs
ort.mjs | `onnxruntime-web` | ✔️| ❌ --> ✔️| ✔️ -->❌ | ort.webgpu.min.js
ort.webgpu.js
ort.webgpu.min.mjs
ort.webgpu.mjs | `onnxruntime-web/webgpu` | ✔️ | ✔️ |❌ | ort.wasm.min.js
ort.wasm.js
ort.wasm.min.mjs
ort.wasm.mjs | `onnxruntime-web/wasm` | ✔️ | ❌ |❌ --- js/web/package.json | 2 +- js/web/script/build.ts | 11 ++++++-- .../e2e/browser-test-wasm-binary-override.js | 2 +- ...r-test-wasm-path-override-filename-jsep.js | 28 +++++++++++++++++++ js/web/test/e2e/run-data.js | 16 +++++------ js/web/test/e2e/run.js | 4 +++ 6 files changed, 51 insertions(+), 12 deletions(-) create mode 100644 js/web/test/e2e/browser-test-wasm-path-override-filename-jsep.js diff --git a/js/web/package.json b/js/web/package.json index 656cd7b56b039..181d6127f5455 100644 --- a/js/web/package.json +++ b/js/web/package.json @@ -83,7 +83,7 @@ "types": "./types.d.ts" }, "./wasm": { - "import": "./dist/ort.wasm.min.mjs", + "import": "./dist/ort.wasm.bundle.min.mjs", "require": "./dist/ort.wasm.min.js", "types": "./types.d.ts" }, diff --git a/js/web/script/build.ts b/js/web/script/build.ts index 408f9e00a5cbd..529e9d1065e69 100644 --- a/js/web/script/build.ts +++ b/js/web/script/build.ts @@ -591,14 +591,14 @@ async function main() { // ort[.min].[m]js await addAllWebBuildTasks({ outputName: 'ort', - define: { ...DEFAULT_DEFINE, 'BUILD_DEFS.DISABLE_JSEP': 'true' }, + define: { ...DEFAULT_DEFINE, 'BUILD_DEFS.DISABLE_WEBGL': 'true' }, }); // ort.bundle.min.mjs await buildOrt({ isProduction: true, outputName: 'ort.bundle', format: 'esm', - define: { ...DEFAULT_DEFINE, 'BUILD_DEFS.DISABLE_JSEP': 'true', 'BUILD_DEFS.DISABLE_DYNAMIC_IMPORT': 'true' }, + define: { ...DEFAULT_DEFINE, 'BUILD_DEFS.DISABLE_WEBGL': 'true', 'BUILD_DEFS.DISABLE_DYNAMIC_IMPORT': 'true' }, }); // ort.webgpu[.min].[m]js @@ -619,6 +619,13 @@ async function main() { outputName: 'ort.wasm', define: { ...DEFAULT_DEFINE, 'BUILD_DEFS.DISABLE_JSEP': 'true', 'BUILD_DEFS.DISABLE_WEBGL': 'true' }, }); + // ort.wasm.bundle.min.mjs + await buildOrt({ + isProduction: true, + outputName: 'ort.wasm.bundle', + format: 'esm', + define: { ...DEFAULT_DEFINE, 'BUILD_DEFS.DISABLE_JSEP': 'true', 'BUILD_DEFS.DISABLE_WEBGL': 'true' }, + }); // ort.webgl[.min].[m]js await addAllWebBuildTasks({ outputName: 'ort.webgl', diff --git a/js/web/test/e2e/browser-test-wasm-binary-override.js b/js/web/test/e2e/browser-test-wasm-binary-override.js index 471c26f6990b5..27cce2ca06236 100644 --- a/js/web/test/e2e/browser-test-wasm-binary-override.js +++ b/js/web/test/e2e/browser-test-wasm-binary-override.js @@ -7,7 +7,7 @@ const documentUrl = document.currentScript.src; it('Browser E2E testing - WebAssembly backend', async function () { // preload .wasm file binary - const wasmUrl = new URL('./node_modules/onnxruntime-web/dist/ort-wasm-simd-threaded.wasm', documentUrl).href; + const wasmUrl = new URL('./node_modules/onnxruntime-web/dist/ort-wasm-simd-threaded.jsep.wasm', documentUrl).href; const response = await fetch(wasmUrl); // make sure the .wasm file is loaded successfully diff --git a/js/web/test/e2e/browser-test-wasm-path-override-filename-jsep.js b/js/web/test/e2e/browser-test-wasm-path-override-filename-jsep.js new file mode 100644 index 0000000000000..d325a5ca7187d --- /dev/null +++ b/js/web/test/e2e/browser-test-wasm-path-override-filename-jsep.js @@ -0,0 +1,28 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +'use strict'; + +it('Browser E2E testing - WebAssembly backend (path override filename)', async function () { + // check base URL port from test args + if (typeof __ort_arg_port === 'undefined') { + throw new Error('test flag --port= is required'); + } + const base = `http://localhost:${__ort_arg_port}/`; + + ort.env.wasm.wasmPaths = {}; + + if (typeof __ort_arg_files === 'string' && __ort_arg_files.includes('wasm')) { + const overrideWasmUrl = new URL('./test-wasm-path-override/jsep-renamed.wasm', base).href; + console.log(`ort.env.wasm.wasmPaths['wasm'] = ${JSON.stringify(overrideWasmUrl)};`); + ort.env.wasm.wasmPaths.wasm = overrideWasmUrl; + } + + if (typeof __ort_arg_files === 'string' && __ort_arg_files.includes('mjs')) { + const overrideMjsUrl = new URL('./test-wasm-path-override/jsep-renamed.mjs', base).href; + console.log(`ort.env.wasm.wasmPaths['mjs'] = ${JSON.stringify(overrideMjsUrl)};`); + ort.env.wasm.wasmPaths.mjs = overrideMjsUrl; + } + + await testFunction(ort, { executionProviders: ['wasm'] }); +}); diff --git a/js/web/test/e2e/run-data.js b/js/web/test/e2e/run-data.js index 04079b042bc23..dbc3ca0bd2460 100644 --- a/js/web/test/e2e/run-data.js +++ b/js/web/test/e2e/run-data.js @@ -14,7 +14,7 @@ const NODEJS_TEST_CASES = [ // [test_for_same_origin, test_for_cross_origin, main_js, ort_main_js, [test_args]] const BROWSER_TEST_CASES = [ // IIFE - [true, true, './browser-test-webgl.js', 'ort.min.js'], // webgl + [true, true, './browser-test-webgl.js', 'ort.all.min.js'], // webgl [true, true, './browser-test-webgl.js', 'ort.webgl.min.js'], // webgl [true, true, './browser-test-wasm.js', 'ort.wasm.min.js'], // wasm, ort.wasm [true, true, './browser-test-wasm-multi-session-create.js', 'ort.min.js'], // wasm, multi-session create @@ -24,7 +24,7 @@ const BROWSER_TEST_CASES = [ [true, true, './browser-test-wasm.js', 'ort.min.js', ['num_threads=1', 'proxy=1']], // wasm, 1 thread, proxy // ort.min.mjs - [true, true, './browser-test-webgl.js', 'ort.min.mjs'], // webgl + [true, true, './browser-test-webgl.js', 'ort.webgl.min.mjs'], // webgl [true, true, './browser-test-wasm.js', 'ort.min.mjs', ['num_threads=1']], // wasm, 1 thread [true, true, './browser-test-wasm.js', 'ort.min.mjs', ['num_threads=2']], // wasm, 2 threads [true, true, './browser-test-wasm.js', 'ort.min.mjs', ['num_threads=2', 'proxy=1']], // wasm, 2 threads, proxy @@ -41,22 +41,22 @@ const BROWSER_TEST_CASES = [ // path override: // wasm, path override filenames for both mjs and wasm, same origin - [true, false, './browser-test-wasm-path-override-filename.js', 'ort.min.js', ['port=9876', 'files=mjs,wasm']], + [true, false, './browser-test-wasm-path-override-filename-jsep.js', 'ort.min.js', ['port=9876', 'files=mjs,wasm']], [true, false, './browser-test-wasm-path-override-filename.js', 'ort.wasm.min.js', ['port=9876', 'files=mjs,wasm']], // wasm, path override filenames for both mjs and wasm, cross origin - [false, true, './browser-test-wasm-path-override-filename.js', 'ort.min.js', ['port=8081', 'files=mjs,wasm']], + [false, true, './browser-test-wasm-path-override-filename-jsep.js', 'ort.min.js', ['port=8081', 'files=mjs,wasm']], [false, true, './browser-test-wasm-path-override-filename.js', 'ort.wasm.min.js', ['port=8081', 'files=mjs,wasm']], // wasm, path override filename for wasm, same origin - [true, false, './browser-test-wasm-path-override-filename.js', 'ort.min.js', ['port=9876', 'files=wasm']], + [true, false, './browser-test-wasm-path-override-filename-jsep.js', 'ort.min.js', ['port=9876', 'files=wasm']], [true, false, './browser-test-wasm-path-override-filename.js', 'ort.wasm.min.js', ['port=9876', 'files=wasm']], // wasm, path override filename for wasm, cross origin - [false, true, './browser-test-wasm-path-override-filename.js', 'ort.min.js', ['port=8081', 'files=wasm']], + [false, true, './browser-test-wasm-path-override-filename-jsep.js', 'ort.min.js', ['port=8081', 'files=wasm']], [false, true, './browser-test-wasm-path-override-filename.js', 'ort.wasm.min.js', ['port=8081', 'files=wasm']], // wasm, path override filename for mjs, same origin - [true, false, './browser-test-wasm-path-override-filename.js', 'ort.min.js', ['port=9876', 'files=mjs']], + [true, false, './browser-test-wasm-path-override-filename-jsep.js', 'ort.min.js', ['port=9876', 'files=mjs']], [true, false, './browser-test-wasm-path-override-filename.js', 'ort.wasm.min.js', ['port=9876', 'files=mjs']], // wasm, path override filename for mjs, cross origin - [false, true, './browser-test-wasm-path-override-filename.js', 'ort.min.js', ['port=8081', 'files=mjs']], + [false, true, './browser-test-wasm-path-override-filename-jsep.js', 'ort.min.js', ['port=8081', 'files=mjs']], [false, true, './browser-test-wasm-path-override-filename.js', 'ort.wasm.min.js', ['port=8081', 'files=mjs']], // wasm, path override prefix, same origin [true, false, './browser-test-wasm-path-override-prefix.js', 'ort.min.js', ['port=9876']], diff --git a/js/web/test/e2e/run.js b/js/web/test/e2e/run.js index 93f9d4a144bf2..3361bbece64ed 100644 --- a/js/web/test/e2e/run.js +++ b/js/web/test/e2e/run.js @@ -146,6 +146,10 @@ function prepareWasmPathOverrideFiles() { fs.copyFileSync(`${sourceFile}.wasm`, path.join(folder, 'ort-wasm-simd-threaded.wasm')); fs.copyFileSync(`${sourceFile}.mjs`, path.join(folder, 'renamed.mjs')); fs.copyFileSync(`${sourceFile}.wasm`, path.join(folder, 'renamed.wasm')); + fs.copyFileSync(`${sourceFile}.jsep.mjs`, path.join(folder, 'ort-wasm-simd-threaded.jsep.mjs')); + fs.copyFileSync(`${sourceFile}.jsep.wasm`, path.join(folder, 'ort-wasm-simd-threaded.jsep.wasm')); + fs.copyFileSync(`${sourceFile}.jsep.mjs`, path.join(folder, 'jsep-renamed.mjs')); + fs.copyFileSync(`${sourceFile}.jsep.wasm`, path.join(folder, 'jsep-renamed.wasm')); } async function testAllNodejsCases() { From a615bd66882c3267a9483377478c983bf3c0c5b6 Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Wed, 4 Dec 2024 09:47:16 -0800 Subject: [PATCH 11/22] Bump version of Dawn to 12a3b24c4 (#23002) ### Description Upgrade version of Dawn. Removed dawn.patch, because all patches are included in upstream. Updated code that affected by API changes (`const char*` -> `WGPUStringView`) ### Motivation and Context --- cgmanifests/generated/cgmanifest.json | 2 +- cmake/deps.txt | 2 +- .../external/onnxruntime_external_deps.cmake | 4 +- cmake/patches/dawn/dawn.patch | 81 ------------------- .../webgpu/quantization/matmul_nbits.cc | 9 ++- .../core/providers/webgpu/webgpu_context.cc | 36 ++++----- .../templates/download-deps.yml | 4 +- 7 files changed, 32 insertions(+), 106 deletions(-) delete mode 100644 cmake/patches/dawn/dawn.patch diff --git a/cgmanifests/generated/cgmanifest.json b/cgmanifests/generated/cgmanifest.json index df27fa5ab1b95..07dff50f9a3bd 100644 --- a/cgmanifests/generated/cgmanifest.json +++ b/cgmanifests/generated/cgmanifest.json @@ -346,7 +346,7 @@ "component": { "type": "git", "git": { - "commitHash": "511eb80847afe6bded34ec491a38d5d78ba2d604", + "commitHash": "12a3b24c456cebd9fd11f23ac0164f78129b00c6", "repositoryUrl": "https://github.com/google/dawn.git" }, "comments": "dawn" diff --git a/cmake/deps.txt b/cmake/deps.txt index 9cf92bf417fcb..21f9ee1701c46 100644 --- a/cmake/deps.txt +++ b/cmake/deps.txt @@ -58,5 +58,5 @@ extensions;https://github.com/microsoft/onnxruntime-extensions/archive/94142d839 composable_kernel;https://github.com/ROCmSoftwarePlatform/composable_kernel/archive/204da9c522cebec5220bba52cd3542ebcaf99e7a.zip;1827348efd47831c13074245274d41b7cae8a557 directx_headers;https://github.com/microsoft/DirectX-Headers/archive/refs/tags/v1.613.1.zip;47653509a3371eabb156360f42faf582f314bf2e cudnn_frontend;https://github.com/NVIDIA/cudnn-frontend/archive/refs/tags/v1.7.0.zip;d0753d8d5b39947ca0729d7773cb84653a129eb1 -dawn;https://github.com/google/dawn/archive/511eb80847afe6bded34ec491a38d5d78ba2d604.zip;c493f5aca5586f6634e25d0121c85df71189fb99 +dawn;https://github.com/google/dawn/archive/12a3b24c456cebd9fd11f23ac0164f78129b00c6.zip;ad428f6dc16f1336d584f7bad5714e1097dafc43 kleidiai;https://gitlab.arm.com/kleidi/kleidiai/-/archive/v0.2.0/kleidiai-v0.2.0.zip;B1E3173992FD91F20DB904AB77D6E901778C2681 diff --git a/cmake/external/onnxruntime_external_deps.cmake b/cmake/external/onnxruntime_external_deps.cmake index 5e619af619f18..ee7abcbad025c 100644 --- a/cmake/external/onnxruntime_external_deps.cmake +++ b/cmake/external/onnxruntime_external_deps.cmake @@ -629,7 +629,9 @@ if (onnxruntime_USE_WEBGPU) dawn URL ${DEP_URL_dawn} URL_HASH SHA1=${DEP_SHA1_dawn} - PATCH_COMMAND ${Patch_EXECUTABLE} --binary --ignore-whitespace -p1 < ${PROJECT_SOURCE_DIR}/patches/dawn/dawn.patch + # All previous patches are merged into the upstream dawn project. We don't need to apply any patches right now. + # if we need to apply patches in the future, we can uncomment the following line. + # PATCH_COMMAND ${Patch_EXECUTABLE} --binary --ignore-whitespace -p1 < ${PROJECT_SOURCE_DIR}/patches/dawn/dawn.patch ) endif() diff --git a/cmake/patches/dawn/dawn.patch b/cmake/patches/dawn/dawn.patch deleted file mode 100644 index 7a2a01d55be46..0000000000000 --- a/cmake/patches/dawn/dawn.patch +++ /dev/null @@ -1,81 +0,0 @@ -diff --git a/src/dawn/native/CMakeLists.txt b/src/dawn/native/CMakeLists.txt -index 9c0bd6fa4e..bf8a57aeac 100644 ---- a/src/dawn/native/CMakeLists.txt -+++ b/src/dawn/native/CMakeLists.txt -@@ -857,6 +857,11 @@ if (DAWN_ENABLE_SWIFTSHADER) - target_compile_definitions(dawn_native PRIVATE "DAWN_ENABLE_SWIFTSHADER") - endif() - -+if (IOS) -+ target_compile_options(dawn_native_objects PRIVATE -fno-objc-arc) -+ target_compile_options(dawn_native PRIVATE -fno-objc-arc) -+endif() -+ - if (DAWN_BUILD_MONOLITHIC_LIBRARY) - ############################################################################### - # Do the 'complete_lib' build. -diff --git a/src/dawn/native/Surface_metal.mm b/src/dawn/native/Surface_metal.mm -index ce55acbd43..2cfd363479 100644 ---- a/src/dawn/native/Surface_metal.mm -+++ b/src/dawn/native/Surface_metal.mm -@@ -33,10 +33,18 @@ - - #import - -+#include "dawn/common/Platform.h" -+ - namespace dawn::native { - - bool InheritsFromCAMetalLayer(void* obj) { -- id object = static_cast(obj); -+ id object = -+#if DAWN_PLATFORM_IS(IOS) -+ (__bridge id)obj; -+#else // DAWN_PLATFORM_IS(IOS) -+ static_cast(obj); -+#endif // DAWN_PLATFORM_IS(IOS) -+ - return [object isKindOfClass:[CAMetalLayer class]]; - } - -diff --git a/src/dawn/native/metal/SharedFenceMTL.mm b/src/dawn/native/metal/SharedFenceMTL.mm -index bde8bfea07..8906185d6f 100644 ---- a/src/dawn/native/metal/SharedFenceMTL.mm -+++ b/src/dawn/native/metal/SharedFenceMTL.mm -@@ -25,6 +25,8 @@ - // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -+#include "dawn/common/Platform.h" -+ - #include "dawn/native/metal/SharedFenceMTL.h" - - #include "dawn/native/ChainUtils.h" -@@ -39,8 +41,13 @@ ResultOrError> SharedFence::Create( - const SharedFenceMTLSharedEventDescriptor* descriptor) { - DAWN_INVALID_IF(descriptor->sharedEvent == nullptr, "MTLSharedEvent is missing."); - if (@available(macOS 10.14, iOS 12.0, *)) { -- return AcquireRef(new SharedFence( -- device, label, static_cast>(descriptor->sharedEvent))); -+ return AcquireRef(new SharedFence(device, label, -+#if DAWN_PLATFORM_IS(IOS) -+ (__bridge id)(descriptor->sharedEvent) -+#else // DAWN_PLATFORM_IS(IOS) -+ static_cast>(descriptor->sharedEvent) -+#endif // DAWN_PLATFORM_IS(IOS) -+ )); - } else { - return DAWN_INTERNAL_ERROR("MTLSharedEvent not supported."); - } -diff --git a/src/tint/api/BUILD.cmake b/src/tint/api/BUILD.cmake -index 0037d83276..6372c4ee77 100644 ---- a/src/tint/api/BUILD.cmake -+++ b/src/tint/api/BUILD.cmake -@@ -57,6 +57,7 @@ tint_target_add_dependencies(tint_api lib - tint_lang_wgsl_ast_transform - tint_lang_wgsl_common - tint_lang_wgsl_features -+ tint_lang_wgsl_inspector - tint_lang_wgsl_program - tint_lang_wgsl_sem - tint_lang_wgsl_writer_ir_to_program diff --git a/onnxruntime/contrib_ops/webgpu/quantization/matmul_nbits.cc b/onnxruntime/contrib_ops/webgpu/quantization/matmul_nbits.cc index 29f328264bf3f..31f95ee64df5d 100644 --- a/onnxruntime/contrib_ops/webgpu/quantization/matmul_nbits.cc +++ b/onnxruntime/contrib_ops/webgpu/quantization/matmul_nbits.cc @@ -1,6 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +#include + #include "contrib_ops/webgpu/quantization/matmul_nbits.h" #include "contrib_ops/webgpu/webgpu_contrib_kernels.h" #include "core/providers/cpu/math/matmul_helper.h" @@ -352,8 +354,11 @@ Status MatMulNBits::ComputeInternal(onnxruntime::webgpu::ComputeContext& context const uint32_t components_a = GetMaxComponents(K); const uint32_t components_b = GetMaxComponents(blob_size_in_words); uint32_t components = GetMaxComponents(N); - const bool is_intel = !std::strcmp(context.AdapterInfo().vendor, "intel") && !std::strcmp(context.AdapterInfo().architecture, "gen-12lp"); - const bool use_block32 = is_intel && block_size == 32; + + // Use block32 for Intel Gen12LP architecture. + const bool use_block32 = context.AdapterInfo().vendor == std::string_view{"intel"} && + context.AdapterInfo().architecture == std::string_view{"gen-12lp"} && + block_size == 32; const bool has_zero_points = zero_points != nullptr; // TODO: Support output_number > 1. Some cases are failed when output_number > 1. // const uint32_t output_number = M > 1 && (N / components) % 2 == 0 ? 2 : 1; diff --git a/onnxruntime/core/providers/webgpu/webgpu_context.cc b/onnxruntime/core/providers/webgpu/webgpu_context.cc index 36aab2e628a16..ea0cbddb0205d 100644 --- a/onnxruntime/core/providers/webgpu/webgpu_context.cc +++ b/onnxruntime/core/providers/webgpu/webgpu_context.cc @@ -58,16 +58,15 @@ void WebGpuContext::Initialize(const WebGpuExecutionProviderInfo& webgpu_ep_info adapter_toggles_desc.enabledToggleCount = enabled_adapter_toggles.size(); adapter_toggles_desc.enabledToggles = enabled_adapter_toggles.data(); - wgpu::RequestAdapterCallbackInfo req_adapter_callback_info = {}; - req_adapter_callback_info.mode = wgpu::CallbackMode::WaitAnyOnly; - req_adapter_callback_info.callback = [](WGPURequestAdapterStatus status, - WGPUAdapter adapter, const char* message, - void* userdata) { - ORT_ENFORCE(status == WGPURequestAdapterStatus_Success, "Failed to get a WebGPU adapter: ", message); - *static_cast(userdata) = wgpu::Adapter::Acquire(adapter); - }; - req_adapter_callback_info.userdata = &adapter_; - ORT_ENFORCE(wgpu::WaitStatus::Success == instance_.WaitAny(instance_.RequestAdapter(&req_adapter_options, req_adapter_callback_info), UINT64_MAX)); + ORT_ENFORCE(wgpu::WaitStatus::Success == instance_.WaitAny(instance_.RequestAdapter( + &req_adapter_options, + wgpu::CallbackMode::WaitAnyOnly, + [](wgpu::RequestAdapterStatus status, wgpu::Adapter adapter, wgpu::StringView message, wgpu::Adapter* ptr) { + ORT_ENFORCE(status == wgpu::RequestAdapterStatus::Success, "Failed to get a WebGPU adapter: ", std::string_view{message}); + *ptr = adapter; + }, + &adapter_), + UINT64_MAX)); ORT_ENFORCE(adapter_ != nullptr, "Failed to get a WebGPU adapter."); } @@ -103,14 +102,15 @@ void WebGpuContext::Initialize(const WebGpuExecutionProviderInfo& webgpu_ep_info std::cerr << "WebGPU device lost (" << int(reason) << "): " << message; }); - wgpu::RequestDeviceCallbackInfo req_device_callback_info = {}; - req_device_callback_info.mode = wgpu::CallbackMode::WaitAnyOnly; - req_device_callback_info.callback = [](WGPURequestDeviceStatus status, WGPUDevice device, char const* message, void* userdata) { - ORT_ENFORCE(status == WGPURequestDeviceStatus_Success, "Failed to get a WebGPU device: ", message); - *static_cast(userdata) = wgpu::Device::Acquire(device); - }; - req_device_callback_info.userdata = &device_; - ORT_ENFORCE(wgpu::WaitStatus::Success == instance_.WaitAny(adapter_.RequestDevice(&device_desc, req_device_callback_info), UINT64_MAX)); + ORT_ENFORCE(wgpu::WaitStatus::Success == instance_.WaitAny(adapter_.RequestDevice( + &device_desc, + wgpu::CallbackMode::WaitAnyOnly, + [](wgpu::RequestDeviceStatus status, wgpu::Device device, wgpu::StringView message, wgpu::Device* ptr) { + ORT_ENFORCE(status == wgpu::RequestDeviceStatus::Success, "Failed to get a WebGPU device: ", std::string_view{message}); + *ptr = device; + }, + &device_), + UINT64_MAX)); ORT_ENFORCE(device_ != nullptr, "Failed to get a WebGPU device."); } diff --git a/tools/ci_build/github/azure-pipelines/templates/download-deps.yml b/tools/ci_build/github/azure-pipelines/templates/download-deps.yml index 4fa36a1ff548b..949479fb8b5e4 100644 --- a/tools/ci_build/github/azure-pipelines/templates/download-deps.yml +++ b/tools/ci_build/github/azure-pipelines/templates/download-deps.yml @@ -11,7 +11,7 @@ steps: packageType: upack feed: '/7424c8e4-5c62-490e-95c4-79446f31017c' definition: '517c4f6f-5437-4392-a70d-4f15ec5be2f0' - version: 1.0.200 + version: 1.0.201 downloadPath: $(Build.BinariesDirectory)/deps # The private ADO project @@ -22,7 +22,7 @@ steps: packageType: upack feed: '/4c7631f5-24c0-4307-8822-1aa8f180c325' definition: 'fd9dd5ad-b73e-4678-890e-edcf680dbc1a' - version: 1.0.200 + version: 1.0.201 downloadPath: $(Build.BinariesDirectory)/deps # You can add more ADO accounts at here. From c19617a24afcfc51559d651d587ba6bb76845fc7 Mon Sep 17 00:00:00 2001 From: Xu Xing Date: Thu, 5 Dec 2024 01:57:32 +0800 Subject: [PATCH 12/22] [js/webgpu] Add GatherND (#22847) ### Description ### Motivation and Context --- js/web/docs/webgpu-operators.md | 1 + .../lib/wasm/jsep/webgpu/op-resolve-rules.ts | 2 + js/web/lib/wasm/jsep/webgpu/ops/gather-nd.ts | 179 ++++++++++++++++++ js/web/test/data/ops/gather-nd.jsonc | 147 ++++++++++++++ js/web/test/suite-test-list.jsonc | 1 + .../providers/js/js_execution_provider.cc | 8 + .../core/providers/js/operators/gather_nd.cc | 41 ++++ .../core/providers/js/operators/gather_nd.h | 24 +++ 8 files changed, 403 insertions(+) create mode 100644 js/web/lib/wasm/jsep/webgpu/ops/gather-nd.ts create mode 100644 js/web/test/data/ops/gather-nd.jsonc create mode 100644 onnxruntime/core/providers/js/operators/gather_nd.cc create mode 100644 onnxruntime/core/providers/js/operators/gather_nd.h diff --git a/js/web/docs/webgpu-operators.md b/js/web/docs/webgpu-operators.md index f63cf17aa4df3..5c8748d75c2bc 100644 --- a/js/web/docs/webgpu-operators.md +++ b/js/web/docs/webgpu-operators.md @@ -50,6 +50,7 @@ Do not modify directly.* | Gather | ai.onnx(1-10,11-12,13+) | | | GatherBlockQuantized | com.microsoft(1+) | | | GatherElements | ai.onnx(11-12,13+) | | +| GatherND | ai.onnx(11,12,13+) | | | Gelu | ai.onnx(20+); com.microsoft(1+) | | | Gemm | ai.onnx(7-8,9-10,11-12,13+) | | | GlobalAveragePool | ai.onnx(1+); com.ms.internal.nhwc(1+) | | diff --git a/js/web/lib/wasm/jsep/webgpu/op-resolve-rules.ts b/js/web/lib/wasm/jsep/webgpu/op-resolve-rules.ts index 28af5d461abe0..6c7afbc7365bb 100644 --- a/js/web/lib/wasm/jsep/webgpu/op-resolve-rules.ts +++ b/js/web/lib/wasm/jsep/webgpu/op-resolve-rules.ts @@ -16,6 +16,7 @@ import { einsum, parseEinsumAttributes } from './ops/einsum'; import { expand } from './ops/expand'; import { fastGelu } from './ops/fast-gelu'; import { gather, parseGatherAttributes } from './ops/gather'; +import { gatherND, parseGatherNDAttributes } from './ops/gather-nd'; import { gatherBlockQuantized, parseGatherBlockQuantizedAttributes } from './ops/gather-block-quantized'; import { gatherElements, parseGatherElementsAttributes } from './ops/gather-elements'; import { gemm, parseGemmAttributes } from './ops/gemm'; @@ -100,6 +101,7 @@ export const WEBGPU_OP_RESOLVE_RULES: Map = new ['Gather', [gather, parseGatherAttributes]], ['GatherElements', [gatherElements, parseGatherElementsAttributes]], ['GatherBlockQuantized', [gatherBlockQuantized, parseGatherBlockQuantizedAttributes]], + ['GatherND', [gatherND, parseGatherNDAttributes]], ['Gelu', [unaryOps.gelu]], ['Gemm', [gemm, parseGemmAttributes]], ['GlobalAveragePool', [pool.globalAveragePool, pool.parseGlobalAveragePoolAttributes]], diff --git a/js/web/lib/wasm/jsep/webgpu/ops/gather-nd.ts b/js/web/lib/wasm/jsep/webgpu/ops/gather-nd.ts new file mode 100644 index 0000000000000..43b51f6e94a66 --- /dev/null +++ b/js/web/lib/wasm/jsep/webgpu/ops/gather-nd.ts @@ -0,0 +1,179 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { DataType } from '../../../wasm-common'; +import { TensorView } from '../../tensor-view'; +import { ShapeUtil } from '../../util'; +import { AttributeWithCacheKey } from '../attribute-with-cache-key'; +import { ComputeContext, ProgramUniform } from '../types'; + +import { createTensorShapeVariables, inputVariable, outputVariable, ShaderHelper, UniformsArrayType } from './common'; + +export interface GatherNDAttributes extends AttributeWithCacheKey { + readonly batchDims: number; +} + +const computeSliceOffsets = ( + context: ComputeContext, + indicesData: TensorView, + sizesFromSliceDimsData: number[], + batchDims: number, + inputDims: readonly number[], + numSlices: number, + numSlicesPerBatch: number, + inputBatchStride: number, + numSliceDims: number, +) => { + const programUniforms: ProgramUniform[] = [ + { type: DataType.uint32, data: numSlices }, + { type: DataType.uint32, data: batchDims }, + { type: DataType.uint32, data: inputDims }, + { type: DataType.uint32, data: sizesFromSliceDimsData }, + { type: DataType.uint32, data: numSlicesPerBatch }, + { type: DataType.uint32, data: inputBatchStride }, + { type: DataType.uint32, data: numSliceDims }, + ]; + + const outputShape = [numSlices]; + programUniforms.push(...createTensorShapeVariables(indicesData.dims, outputShape)); + + const getShaderSource = (shaderHelper: ShaderHelper) => { + const indices = inputVariable('indices_data', indicesData.dataType, indicesData.dims.length); + const output = outputVariable('input_slice_offsets_data', DataType.uint32, 1, 1); + const variables = [indices, output]; + const uniforms: UniformsArrayType = [ + { name: 'output_size', type: 'u32' }, + { name: 'batch_dims', type: 'u32' }, + { name: 'input_dims', type: 'u32', length: inputDims.length }, + { name: 'sizes_from_slice_dims_data', type: 'u32', length: sizesFromSliceDimsData.length }, + { name: 'num_slices_per_batch', type: 'u32' }, + { name: 'input_batch_stride', type: 'u32' }, + { name: 'num_slice_dims', type: 'u32' }, + ]; + return ` + ${shaderHelper.registerUniforms(uniforms).declareVariables(...variables)} + ${shaderHelper.mainStart()} + ${shaderHelper.guardAgainstOutOfBoundsWorkgroupSizes('uniforms.output_size')} + let batch_idx = global_idx / uniforms.num_slices_per_batch; + let base_offset = batch_idx * uniforms.input_batch_stride; + + let slice_indices_base_offset = global_idx * uniforms.num_slice_dims; + var relative_slice_offset = 0; + for (var dim_idx = 0u; dim_idx < uniforms.num_slice_dims; dim_idx ++) { + var index = i32(indices_data[dim_idx + slice_indices_base_offset].x); + let input_dim_idx = uniforms.batch_dims + dim_idx; + if (index < 0) { + ${ + inputDims.length === 1 + ? 'index += i32(uniforms.input_dims);' + : 'index += i32(uniforms.input_dims[input_dim_idx]);' + } + } + ${ + sizesFromSliceDimsData.length === 1 + ? 'relative_slice_offset += index * i32(uniforms.sizes_from_slice_dims_data);' + : 'relative_slice_offset += index * i32(uniforms.sizes_from_slice_dims_data[dim_idx]);' + } + } + + input_slice_offsets_data[global_idx] = base_offset + u32(relative_slice_offset); + }`; + }; + + return context.compute( + { + name: 'computeSliceOffsets', + shaderCache: { hint: `${inputDims.length}_${sizesFromSliceDimsData.length}`, inputDependencies: ['rank'] }, + getRunData: () => ({ + outputs: [{ dims: outputShape, dataType: context.inputs[1].dataType }], + dispatchGroup: { x: Math.ceil(numSlices / 64) }, + programUniforms, + }), + getShaderSource, + }, + { inputs: [indicesData], outputs: [-1] }, + )[0]; +}; + +export const gatherND = (context: ComputeContext, attributes: GatherNDAttributes) => { + const inputs = context.inputs; + const inputShape = inputs[0].dims; + const inputType = inputs[0].dataType; + const indicesShape = inputs[1].dims; + const numSliceDims = indicesShape[indicesShape.length - 1]; + const numSlices = ShapeUtil.sizeToDimension(indicesShape, indicesShape.length - 1); + const sliceSize = ShapeUtil.sizeFromDimension(inputShape, attributes.batchDims + numSliceDims); + const numBatches = ShapeUtil.sizeToDimension(inputShape, attributes.batchDims); + const inputBatchStride = ShapeUtil.sizeFromDimension(inputShape, attributes.batchDims); + const numSlicesPerBatch = numSlices / numBatches; + const sizesFromSliceDims = new Array(numSliceDims); + let runningProduct = sliceSize; + for (let i = 0; i < numSliceDims; ++i) { + sizesFromSliceDims[numSliceDims - 1 - i] = runningProduct; + runningProduct *= inputShape[attributes.batchDims + numSliceDims - 1 - i]; + } + + const inputSliceOffsets = computeSliceOffsets( + context, + inputs[1], + sizesFromSliceDims, + attributes.batchDims, + inputShape, + numSlices, + numSlicesPerBatch, + inputBatchStride, + numSliceDims, + ); + + const lastIndicesDimension = attributes.batchDims + numSliceDims; + if (lastIndicesDimension > inputShape.length) { + throw new Error('last dimension of indices must not be larger than rank of input tensor'); + } + + const outputShape = indicesShape.slice(0, -1).concat(inputShape.slice(lastIndicesDimension)); + const outputSize = ShapeUtil.size(outputShape); + + const programUniforms: ProgramUniform[] = [ + { type: DataType.uint32, data: outputSize }, + { type: DataType.uint32, data: sliceSize }, + ...createTensorShapeVariables(inputs[0].dims, inputSliceOffsets.dims, outputShape), + ]; + + const getShaderSource = (shaderHelper: ShaderHelper) => { + const input = inputVariable('data', inputs[0].dataType, inputs[0].dims.length); + const indices = inputVariable('slice_offsets', DataType.uint32, inputSliceOffsets.dims.length); + + const output = outputVariable('output', inputs[0].dataType, outputShape.length); + return ` + ${shaderHelper + .registerUniform('output_size', 'u32') + .registerUniform('slice_size', 'u32') + .declareVariables(input, indices, output)} + ${shaderHelper.mainStart()} + ${shaderHelper.guardAgainstOutOfBoundsWorkgroupSizes('uniforms.output_size')} + let slice_offset = slice_offsets[global_idx / uniforms.slice_size]; + output[global_idx] = data[u32(slice_offset) + global_idx % uniforms.slice_size]; + }`; + }; + context.compute( + { + name: 'GatherND', + shaderCache: { hint: attributes.cacheKey, inputDependencies: ['rank', 'rank'] }, + getRunData: () => ({ + outputs: [{ dims: outputShape, dataType: inputType }], + dispatchGroup: { x: Math.ceil(outputSize / 64 /* workgroup size */) }, + programUniforms, + }), + getShaderSource, + }, + { inputs: [inputs[0], inputSliceOffsets] }, + ); +}; + +export const parseGatherNDAttributes = (attributes: Record): GatherNDAttributes => { + const batchDims = attributes.batch_dims as number; + return { + batchDims, + cacheKey: '', + }; +}; diff --git a/js/web/test/data/ops/gather-nd.jsonc b/js/web/test/data/ops/gather-nd.jsonc new file mode 100644 index 0000000000000..209c7d1f74087 --- /dev/null +++ b/js/web/test/data/ops/gather-nd.jsonc @@ -0,0 +1,147 @@ +[ + { + "name": "GatherND int32", + "operator": "GatherND", + "attributes": [], + "cases": [ + { + "name": "data[4] indices[]", + "inputs": [ + { + "data": [100, 101, 102, 777, 778, 779, 1000, 1001, 1002], + "dims": [9], + "type": "int32" + }, + { + "data": [0, 4, 8], + "dims": [3, 1], + "type": "int64" + } + ], + "outputs": [ + { + "data": [100, 778, 1002], + "dims": [3], + "type": "int32" + } + ] + } + ] + }, + { + "name": "GatherND float32", + "operator": "GatherND", + "attributes": [], + "cases": [ + { + "name": "data[4] indices[]", + "inputs": [ + { + "data": [100.1, 101.2, 102.3, 777.4, 778.5, 779.6, 1000.7, 1001.8, 1002.9], + "dims": [9], + "type": "float32" + }, + { + "data": [0, 4, 8], + "dims": [3, 1], + "type": "int64" + } + ], + "outputs": [ + { + "data": [100.0999984741211, 778.5, 1002.9000244140625], + "dims": [3], + "type": "float32" + } + ] + } + ] + }, + { + "name": "GatherND int32 [2 2 2], batch_dims", + "operator": "GatherND", + "attributes": [{ "name": "batch_dims", "data": 1, "type": "int" }], + "cases": [ + { + "name": "data[4] indices[]", + "inputs": [ + { + "data": [0, 1, 2, 3, 4, 5, 6, 7], + "dims": [2, 2, 2], + "type": "int32" + }, + { + "data": [1, 0], + "dims": [2, 1], + "type": "int64" + } + ], + "outputs": [ + { + "data": [2, 3, 4, 5], + "dims": [2, 2], + "type": "int32" + } + ] + } + ] + }, + { + "name": "GatherND float16", + "operator": "GatherND", + "attributes": [], + "cases": [ + { + "name": "data[4] indices[]", + "inputs": [ + { + "data": [100.1, 101.2, 102.3, 777.4, 778.5, 779.6, 1000.7, 1001.8, 1002.9], + "dims": [9], + "type": "float16" + }, + { + "data": [0, 4, 8], + "dims": [3, 1], + "type": "int64" + } + ], + "outputs": [ + { + "data": [100.0999984741211, 778.5, 1002.9000244140625], + "dims": [3], + "type": "float16" + } + ] + } + ] + }, + { + "name": "GatherND uint32 [2 2 2], batch_dims", + "operator": "GatherND", + "attributes": [{ "name": "batch_dims", "data": 1, "type": "int" }], + "cases": [ + { + "name": "data[4] indices[]", + "inputs": [ + { + "data": [0, 1, 2, 3, 4, 5, 6, 7], + "dims": [2, 2, 2], + "type": "uint32" + }, + { + "data": [1, 0], + "dims": [2, 1], + "type": "int64" + } + ], + "outputs": [ + { + "data": [2, 3, 4, 5], + "dims": [2, 2], + "type": "uint32" + } + ] + } + ] + } +] diff --git a/js/web/test/suite-test-list.jsonc b/js/web/test/suite-test-list.jsonc index 45fb771ee13bb..f179756967d49 100644 --- a/js/web/test/suite-test-list.jsonc +++ b/js/web/test/suite-test-list.jsonc @@ -1365,6 +1365,7 @@ "gather.jsonc", "gather-block-quantized.jsonc", "gather-elements.jsonc", + "gather-nd.jsonc", "gemm.jsonc", "global-average-pool.jsonc", "greater.jsonc", diff --git a/onnxruntime/core/providers/js/js_execution_provider.cc b/onnxruntime/core/providers/js/js_execution_provider.cc index c0d62bf47a0dd..4cb40ec8bf5fd 100644 --- a/onnxruntime/core/providers/js/js_execution_provider.cc +++ b/onnxruntime/core/providers/js/js_execution_provider.cc @@ -341,6 +341,10 @@ class ONNX_OPERATOR_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnnxDomain, 13, Gat class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnnxDomain, 11, 12, GatherElements); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnnxDomain, 13, GatherElements); +class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnnxDomain, 11, 11, GatherND); +class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnnxDomain, 12, 12, GatherND); +class ONNX_OPERATOR_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnnxDomain, 13, GatherND); + class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnnxDomain, 1, 9, Slice); class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnnxDomain, 10, 10, Slice); class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnnxDomain, 11, 12, Slice); @@ -667,6 +671,10 @@ std::unique_ptr RegisterKernels() { BuildKernelCreateInfo, BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, diff --git a/onnxruntime/core/providers/js/operators/gather_nd.cc b/onnxruntime/core/providers/js/operators/gather_nd.cc new file mode 100644 index 0000000000000..ee69100cc658e --- /dev/null +++ b/onnxruntime/core/providers/js/operators/gather_nd.cc @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "core/providers/js/js_kernel.h" +#include "core/providers/js/js_data_types.h" +#include "gather_nd.h" + +namespace onnxruntime { +namespace js { + +ONNX_OPERATOR_KERNEL_EX( + GatherND, + kOnnxDomain, + 13, + kJsExecutionProvider, + (*KernelDefBuilder::Create()) + .TypeConstraint("T", JsepSupportedDataTypes()), + GatherND); + +ONNX_OPERATOR_VERSIONED_KERNEL_EX( + GatherND, + kOnnxDomain, + 12, + 12, + kJsExecutionProvider, + (*KernelDefBuilder::Create()) + .TypeConstraint("T", JsepSupportedDataTypes()), + GatherND); + +ONNX_OPERATOR_VERSIONED_KERNEL_EX( + GatherND, + kOnnxDomain, + 11, + 11, + kJsExecutionProvider, + (*KernelDefBuilder::Create()) + .TypeConstraint("T", JsepSupportedDataTypes()), + GatherND); + +} // namespace js +} // namespace onnxruntime diff --git a/onnxruntime/core/providers/js/operators/gather_nd.h b/onnxruntime/core/providers/js/operators/gather_nd.h new file mode 100644 index 0000000000000..cdf7a52630dad --- /dev/null +++ b/onnxruntime/core/providers/js/operators/gather_nd.h @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include "core/providers/js/js_kernel.h" + +namespace onnxruntime { +namespace js { + +class GatherND : public JsKernel { + public: + GatherND(const OpKernelInfo& info) : JsKernel(info) { + int64_t batchDims = info.GetAttrOrDefault("batch_dims", 0); + + JSEP_INIT_KERNEL_ATTRIBUTE(GatherND, ({ + "batch_dims" : Number($1), + }), + static_cast(batchDims)); + } +}; + +} // namespace js +} // namespace onnxruntime From fdf5ffe2cfe37a233e8eda5fdfec2eab0929fc10 Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Wed, 4 Dec 2024 11:29:27 -0800 Subject: [PATCH 13/22] [js/node] fix TypeScript declaration in onnxruntime-node (#23000) ### Description fix TypeScript declaration in onnxruntime-node ### Motivation and Context Fixes #22978 --- js/node/tsconfig.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/js/node/tsconfig.json b/js/node/tsconfig.json index c154c3e148ed0..0401fb9609ad6 100644 --- a/js/node/tsconfig.json +++ b/js/node/tsconfig.json @@ -1,7 +1,8 @@ { "extends": "../tsconfig.json", "compilerOptions": { - "outDir": "dist" + "outDir": "dist", + "declaration": true }, "include": ["lib"] } From d0dde4f7d448f018aa5307b2aeb60d4a8d9d8d04 Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Wed, 4 Dec 2024 13:08:13 -0800 Subject: [PATCH 14/22] [wasm/test] update packages versions (#23008) ### Description Upgrade packages version to resolve the following dependabot alerts: - https://github.com/microsoft/onnxruntime/security/dependabot/269 - https://github.com/microsoft/onnxruntime/security/dependabot/268 - https://github.com/microsoft/onnxruntime/security/dependabot/275 - https://github.com/microsoft/onnxruntime/security/dependabot/306 ``` # npm audit report braces <3.0.3 Severity: high Uncontrolled resource consumption in braces - https://github.com/advisories/GHSA-grv7-fg5c-xmjg fix available via `npm audit fix` node_modules/braces cookie <0.7.0 cookie accepts cookie name, path, and domain with out of bounds characters - https://github.com/advisories/GHSA-pxg6-pf52-xh8x fix available via `npm audit fix` node_modules/cookie engine.io 0.7.8 - 0.7.9 || 1.8.0 - 6.6.1 Depends on vulnerable versions of cookie Depends on vulnerable versions of ws node_modules/engine.io socket.io 1.6.0 - 4.7.5 Depends on vulnerable versions of engine.io node_modules/socket.io ws 8.0.0 - 8.17.0 Severity: high ws affected by a DoS when handling a request with many HTTP headers - https://github.com/advisories/GHSA-3h5v-q93c-6h6q fix available via `npm audit fix` node_modules/ws socket.io-adapter 2.5.2 - 2.5.4 Depends on vulnerable versions of ws node_modules/socket.io-adapter 6 vulnerabilities (1 low, 1 moderate, 4 high) ``` --- onnxruntime/test/wasm/package-lock.json | 300 ++++++++++++++---------- 1 file changed, 181 insertions(+), 119 deletions(-) diff --git a/onnxruntime/test/wasm/package-lock.json b/onnxruntime/test/wasm/package-lock.json index 522e96fc3188a..3bd5d173dbe79 100644 --- a/onnxruntime/test/wasm/package-lock.json +++ b/onnxruntime/test/wasm/package-lock.json @@ -27,9 +27,9 @@ } }, "node_modules/@socket.io/component-emitter": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz", - "integrity": "sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz", + "integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==", "dev": true }, "node_modules/@types/cookie": { @@ -39,19 +39,22 @@ "dev": true }, "node_modules/@types/cors": { - "version": "2.8.13", - "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.13.tgz", - "integrity": "sha512-RG8AStHlUiV5ysZQKq97copd2UmVYw3/pRMLefISZ3S1hK104Cwm7iLQ3fTKx+lsUH2CE8FlLaYeEA2LSeqYUA==", + "version": "2.8.17", + "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.17.tgz", + "integrity": "sha512-8CGDvrBj1zgo2qE+oS3pOCyYNqCPryMWY2bGfwA0dcfopWGgxs+78df0Rs3rc9THP4JkOhLsAa+15VdpAqkcUA==", "dev": true, "dependencies": { "@types/node": "*" } }, "node_modules/@types/node": { - "version": "18.13.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.13.0.tgz", - "integrity": "sha512-gC3TazRzGoOnoKAhUx+Q0t8S9Tzs74z7m0ipwGpSqQrleP14hKxP4/JUeEQcD3W1/aIpnWl8pHowI7WokuZpXg==", - "dev": true + "version": "22.10.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.10.1.tgz", + "integrity": "sha512-qKgsUwfHZV2WCWLAnVP1JqnpE6Im6h3Y0+fYgMTasNQ7V++CBX5OT1as0g0f+OyubbFqhf6XVNIsmN4IIhEgGQ==", + "dev": true, + "dependencies": { + "undici-types": "~6.20.0" + } }, "node_modules/accepts": { "version": "1.3.8", @@ -162,12 +165,12 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" @@ -288,9 +291,9 @@ } }, "node_modules/cookie": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.2.tgz", - "integrity": "sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==", + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", "dev": true, "engines": { "node": ">= 0.6" @@ -409,9 +412,9 @@ } }, "node_modules/engine.io": { - "version": "6.4.2", - "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.4.2.tgz", - "integrity": "sha512-FKn/3oMiJjrOEOeUub2WCox6JhxBXq/Zn3fZOMCBxKnNYtsdKjxhl7yR3fZhM9PV+rdE75SU5SYMc+2PGzo+Tg==", + "version": "6.6.2", + "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.6.2.tgz", + "integrity": "sha512-gmNvsYi9C8iErnZdVcJnvCpSKbWTt1E8+JZo8b+daLninywUWi5NQ5STSHZ9rFjFO7imNcvb8Pc5pe/wMR5xEw==", "dev": true, "dependencies": { "@types/cookie": "^0.4.1", @@ -419,32 +422,32 @@ "@types/node": ">=10.0.0", "accepts": "~1.3.4", "base64id": "2.0.0", - "cookie": "~0.4.1", + "cookie": "~0.7.2", "cors": "~2.8.5", "debug": "~4.3.1", - "engine.io-parser": "~5.0.3", - "ws": "~8.11.0" + "engine.io-parser": "~5.2.1", + "ws": "~8.17.1" }, "engines": { - "node": ">=10.0.0" + "node": ">=10.2.0" } }, "node_modules/engine.io-parser": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.0.6.tgz", - "integrity": "sha512-tjuoZDMAdEhVnSFleYPCtdL2GXwVTGtNjoeJd9IhIG3C1xs9uwxqRNEu5WpnDZCaozwVlK/nuQhpodhXSIMaxw==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.2.3.tgz", + "integrity": "sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q==", "dev": true, "engines": { "node": ">=10.0.0" } }, "node_modules/engine.io/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", "dev": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -456,9 +459,9 @@ } }, "node_modules/engine.io/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true }, "node_modules/ent": { @@ -516,9 +519,9 @@ "dev": true }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dev": true, "dependencies": { "to-regex-range": "^5.0.1" @@ -1304,35 +1307,60 @@ } }, "node_modules/socket.io": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.6.0.tgz", - "integrity": "sha512-b65bp6INPk/BMMrIgVvX12x3Q+NqlGqSlTuvKQWt0BUJ3Hyy3JangBl7fEoWZTXbOKlCqNPbQ6MbWgok/km28w==", + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.8.1.tgz", + "integrity": "sha512-oZ7iUCxph8WYRHHcjBEc9unw3adt5CmSNlppj/5Q4k2RIrhl8Z5yY2Xr4j9zj0+wzVZ0bxmYoGSzKJnRl6A4yg==", "dev": true, "dependencies": { "accepts": "~1.3.4", "base64id": "~2.0.0", + "cors": "~2.8.5", "debug": "~4.3.2", - "engine.io": "~6.4.0", + "engine.io": "~6.6.0", "socket.io-adapter": "~2.5.2", - "socket.io-parser": "~4.2.1" + "socket.io-parser": "~4.2.4" }, "engines": { - "node": ">=10.0.0" + "node": ">=10.2.0" } }, "node_modules/socket.io-adapter": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.5.2.tgz", - "integrity": "sha512-87C3LO/NOMc+eMcpcxUBebGjkpMDkNBS9tf7KJqcDsmL936EChtVva71Dw2q4tQcuVC+hAUy4an2NO/sYXmwRA==", + "version": "2.5.5", + "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.5.5.tgz", + "integrity": "sha512-eLDQas5dzPgOWCk9GuuJC2lBqItuhKI4uxGgo9aIV7MYbk2h9Q6uULEh8WBzThoI7l+qU9Ast9fVUmkqPP9wYg==", + "dev": true, + "dependencies": { + "debug": "~4.3.4", + "ws": "~8.17.1" + } + }, + "node_modules/socket.io-adapter/node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", "dev": true, "dependencies": { - "ws": "~8.11.0" + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } } }, + "node_modules/socket.io-adapter/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + }, "node_modules/socket.io-parser": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.3.tgz", - "integrity": "sha512-JMafRntWVO2DCJimKsRTh/wnqVvO4hrfwOqtO7f+uzwsQMuxO6VwImtYxaQ+ieoyshWOTJyV0fA21lccEXRPpQ==", + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.4.tgz", + "integrity": "sha512-/GbIKmo8ioc+NIWIhwdecY0ge+qVBSMdgxGygevmdHj24bsfgtCmcUUcQ5ZzcylGFHsN3k4HB4Cgkl96KVnuew==", "dev": true, "dependencies": { "@socket.io/component-emitter": "~3.1.0", @@ -1343,12 +1371,12 @@ } }, "node_modules/socket.io-parser/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", "dev": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -1360,9 +1388,9 @@ } }, "node_modules/socket.io-parser/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true }, "node_modules/socket.io/node_modules/debug": { @@ -1534,6 +1562,12 @@ "node": "*" } }, + "node_modules/undici-types": { + "version": "6.20.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", + "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", + "dev": true + }, "node_modules/universalify": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", @@ -1615,16 +1649,16 @@ "dev": true }, "node_modules/ws": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.11.0.tgz", - "integrity": "sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg==", + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", "dev": true, "engines": { "node": ">=10.0.0" }, "peerDependencies": { "bufferutil": "^4.0.1", - "utf-8-validate": "^5.0.2" + "utf-8-validate": ">=5.0.2" }, "peerDependenciesMeta": { "bufferutil": { @@ -1686,9 +1720,9 @@ "dev": true }, "@socket.io/component-emitter": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz", - "integrity": "sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz", + "integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==", "dev": true }, "@types/cookie": { @@ -1698,19 +1732,22 @@ "dev": true }, "@types/cors": { - "version": "2.8.13", - "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.13.tgz", - "integrity": "sha512-RG8AStHlUiV5ysZQKq97copd2UmVYw3/pRMLefISZ3S1hK104Cwm7iLQ3fTKx+lsUH2CE8FlLaYeEA2LSeqYUA==", + "version": "2.8.17", + "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.17.tgz", + "integrity": "sha512-8CGDvrBj1zgo2qE+oS3pOCyYNqCPryMWY2bGfwA0dcfopWGgxs+78df0Rs3rc9THP4JkOhLsAa+15VdpAqkcUA==", "dev": true, "requires": { "@types/node": "*" } }, "@types/node": { - "version": "18.13.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.13.0.tgz", - "integrity": "sha512-gC3TazRzGoOnoKAhUx+Q0t8S9Tzs74z7m0ipwGpSqQrleP14hKxP4/JUeEQcD3W1/aIpnWl8pHowI7WokuZpXg==", - "dev": true + "version": "22.10.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.10.1.tgz", + "integrity": "sha512-qKgsUwfHZV2WCWLAnVP1JqnpE6Im6h3Y0+fYgMTasNQ7V++CBX5OT1as0g0f+OyubbFqhf6XVNIsmN4IIhEgGQ==", + "dev": true, + "requires": { + "undici-types": "~6.20.0" + } }, "accepts": { "version": "1.3.8", @@ -1796,12 +1833,12 @@ } }, "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, "requires": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" } }, "bytes": { @@ -1890,9 +1927,9 @@ "dev": true }, "cookie": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.2.tgz", - "integrity": "sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==", + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", "dev": true }, "cors": { @@ -1986,9 +2023,9 @@ "dev": true }, "engine.io": { - "version": "6.4.2", - "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.4.2.tgz", - "integrity": "sha512-FKn/3oMiJjrOEOeUub2WCox6JhxBXq/Zn3fZOMCBxKnNYtsdKjxhl7yR3fZhM9PV+rdE75SU5SYMc+2PGzo+Tg==", + "version": "6.6.2", + "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.6.2.tgz", + "integrity": "sha512-gmNvsYi9C8iErnZdVcJnvCpSKbWTt1E8+JZo8b+daLninywUWi5NQ5STSHZ9rFjFO7imNcvb8Pc5pe/wMR5xEw==", "dev": true, "requires": { "@types/cookie": "^0.4.1", @@ -1996,34 +2033,34 @@ "@types/node": ">=10.0.0", "accepts": "~1.3.4", "base64id": "2.0.0", - "cookie": "~0.4.1", + "cookie": "~0.7.2", "cors": "~2.8.5", "debug": "~4.3.1", - "engine.io-parser": "~5.0.3", - "ws": "~8.11.0" + "engine.io-parser": "~5.2.1", + "ws": "~8.17.1" }, "dependencies": { "debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", "dev": true, "requires": { - "ms": "2.1.2" + "ms": "^2.1.3" } }, "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true } } }, "engine.io-parser": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.0.6.tgz", - "integrity": "sha512-tjuoZDMAdEhVnSFleYPCtdL2GXwVTGtNjoeJd9IhIG3C1xs9uwxqRNEu5WpnDZCaozwVlK/nuQhpodhXSIMaxw==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.2.3.tgz", + "integrity": "sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q==", "dev": true }, "ent": { @@ -2072,9 +2109,9 @@ "dev": true }, "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dev": true, "requires": { "to-regex-range": "^5.0.1" @@ -2651,17 +2688,18 @@ } }, "socket.io": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.6.0.tgz", - "integrity": "sha512-b65bp6INPk/BMMrIgVvX12x3Q+NqlGqSlTuvKQWt0BUJ3Hyy3JangBl7fEoWZTXbOKlCqNPbQ6MbWgok/km28w==", + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.8.1.tgz", + "integrity": "sha512-oZ7iUCxph8WYRHHcjBEc9unw3adt5CmSNlppj/5Q4k2RIrhl8Z5yY2Xr4j9zj0+wzVZ0bxmYoGSzKJnRl6A4yg==", "dev": true, "requires": { "accepts": "~1.3.4", "base64id": "~2.0.0", + "cors": "~2.8.5", "debug": "~4.3.2", - "engine.io": "~6.4.0", + "engine.io": "~6.6.0", "socket.io-adapter": "~2.5.2", - "socket.io-parser": "~4.2.1" + "socket.io-parser": "~4.2.4" }, "dependencies": { "debug": { @@ -2682,18 +2720,36 @@ } }, "socket.io-adapter": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.5.2.tgz", - "integrity": "sha512-87C3LO/NOMc+eMcpcxUBebGjkpMDkNBS9tf7KJqcDsmL936EChtVva71Dw2q4tQcuVC+hAUy4an2NO/sYXmwRA==", + "version": "2.5.5", + "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.5.5.tgz", + "integrity": "sha512-eLDQas5dzPgOWCk9GuuJC2lBqItuhKI4uxGgo9aIV7MYbk2h9Q6uULEh8WBzThoI7l+qU9Ast9fVUmkqPP9wYg==", "dev": true, "requires": { - "ws": "~8.11.0" + "debug": "~4.3.4", + "ws": "~8.17.1" + }, + "dependencies": { + "debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "dev": true, + "requires": { + "ms": "^2.1.3" + } + }, + "ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + } } }, "socket.io-parser": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.3.tgz", - "integrity": "sha512-JMafRntWVO2DCJimKsRTh/wnqVvO4hrfwOqtO7f+uzwsQMuxO6VwImtYxaQ+ieoyshWOTJyV0fA21lccEXRPpQ==", + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.4.tgz", + "integrity": "sha512-/GbIKmo8ioc+NIWIhwdecY0ge+qVBSMdgxGygevmdHj24bsfgtCmcUUcQ5ZzcylGFHsN3k4HB4Cgkl96KVnuew==", "dev": true, "requires": { "@socket.io/component-emitter": "~3.1.0", @@ -2701,18 +2757,18 @@ }, "dependencies": { "debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", "dev": true, "requires": { - "ms": "2.1.2" + "ms": "^2.1.3" } }, "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true } } @@ -2817,6 +2873,12 @@ "integrity": "sha512-s8ax/CeZdK9R/56Sui0WM6y9OFREJarMRHqLB2EwkovemBxNQ+Bqu8GAsUnVcXKgphb++ghr/B2BZx4mahujPw==", "dev": true }, + "undici-types": { + "version": "6.20.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", + "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", + "dev": true + }, "universalify": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", @@ -2874,9 +2936,9 @@ "dev": true }, "ws": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.11.0.tgz", - "integrity": "sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg==", + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", "dev": true, "requires": {} }, From 7b0fa407ebe414051aa563ddee5f3450e110e154 Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Wed, 4 Dec 2024 13:08:29 -0800 Subject: [PATCH 15/22] fix requirements.txt path (#22946) ### Description #22380 removes the file `tools/ci_build/github/linux/docker/inference/x86_64/python/cpu/scripts/requirements.txt` but it is still used in `dockerfiles/Dockerfile.cuda`. This change updates the file path of the requirements.txt fixes #22945. --- dockerfiles/Dockerfile.cuda | 2 +- docs/How_To_Update_ONNX_Dev_Notes.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dockerfiles/Dockerfile.cuda b/dockerfiles/Dockerfile.cuda index ce4560e9b0c7c..40f11dca623a7 100644 --- a/dockerfiles/Dockerfile.cuda +++ b/dockerfiles/Dockerfile.cuda @@ -48,7 +48,7 @@ RUN cd /code \ && python3 -m venv /code/env \ && . /code/env/bin/activate \ && pip install --upgrade psutil setuptools wheel packaging \ - && pip install -r tools/ci_build/github/linux/docker/inference/x86_64/python/cpu/scripts/requirements.txt \ + && pip install -r /code/tools/ci_build/github/linux/python/requirements.txt \ && python /code/tools/ci_build/build.py --build_dir /code/build/Linux \ --allow_running_as_root --skip_submodule_sync \ --use_cuda --cuda_home /usr/local/cuda \ diff --git a/docs/How_To_Update_ONNX_Dev_Notes.md b/docs/How_To_Update_ONNX_Dev_Notes.md index 4d8a286bde66e..199e6671f6a1a 100644 --- a/docs/How_To_Update_ONNX_Dev_Notes.md +++ b/docs/How_To_Update_ONNX_Dev_Notes.md @@ -21,7 +21,7 @@ This file should be generated. See [cgmanifests/README](/cgmanifests/README.md) - [onnxruntime/test/python/requirements.txt](/onnxruntime/test/python/requirements.txt) - [tools/ci_build/github/linux/docker/scripts/requirements.txt](/tools/ci_build/github/linux/docker/scripts/requirements.txt) - [tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt](/tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt) -- [tools/ci_build/github/linux/docker/inference/x86_64/python/cpu/scripts/requirements.txt](/tools/ci_build/github/linux/docker/inference/x86_64/python/cpu/scripts/requirements.txt) +- [tools/ci_build/github/linux/python/requirements.txt](/tools/ci_build/github/linux/python/requirements.txt) - Run `git grep -rn "onnx==1" .` to find other locations and update this document if necessary. 1. If there is any change to `cmake/external/onnx/onnx/*.in.proto`, you need to regenerate OnnxMl.cs. From fbe22fdac70553b01dbb22ced4600919ec2be1de Mon Sep 17 00:00:00 2001 From: Jing Fang <126209182+fajin-corp@users.noreply.github.com> Date: Wed, 4 Dec 2024 22:55:52 +0000 Subject: [PATCH 16/22] [ARM CPU] Fix flaky hqnbitgemm UT (#23010) ### Description Increase fp16 qnbitgemm UT tol and use fixed seeds. ### Motivation and Context --- .../test/mlas/unittest/test_hqnbitgemm_neon.cpp | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/onnxruntime/test/mlas/unittest/test_hqnbitgemm_neon.cpp b/onnxruntime/test/mlas/unittest/test_hqnbitgemm_neon.cpp index a455007c2f6ae..b598c20e29280 100644 --- a/onnxruntime/test/mlas/unittest/test_hqnbitgemm_neon.cpp +++ b/onnxruntime/test/mlas/unittest/test_hqnbitgemm_neon.cpp @@ -81,7 +81,6 @@ class MlasNeonFp16CastTest : public MlasTestBase { class MlasNeonFp16PrepackTest : public MlasTestBase { private: - std::random_device rd_; // a seed source for the random number engine unsigned int seed_; std::mt19937 gen_; // mersenne_twister_engine seeded with rd() std::uniform_int_distribution<> distrib_; @@ -173,7 +172,7 @@ class MlasNeonFp16PrepackTest : public MlasTestBase { public: MlasNeonFp16PrepackTest() - : seed_(rd_()), gen_(seed_), distrib_(0, 255) { + : seed_(19287), gen_(seed_), distrib_(0, 255) { } static const char* GetTestSuiteName() { @@ -197,7 +196,6 @@ class MlasNeonFp16PrepackTest : public MlasTestBase { class MlasNeonFp16DequantBTest : public MlasTestBase { private: - std::random_device rd_; // a seed source for the random number engine unsigned int seed_; std::mt19937 gen_; // mersenne_twister_engine seeded with rd() std::uniform_int_distribution<> distrib_; @@ -318,7 +316,7 @@ class MlasNeonFp16DequantBTest : public MlasTestBase { public: MlasNeonFp16DequantBTest() - : seed_(rd_()), gen_(seed_), distrib_(0, 255), _distribFp(0.5f, 2.0f) { + : seed_(19287), gen_(seed_), distrib_(0, 255), _distribFp(0.5f, 2.0f) { } static const char* GetTestSuiteName() { @@ -353,7 +351,6 @@ class MlasNeonFp16DequantBTest : public MlasTestBase { class MlasNeonFp16HQ4BitGemmKernelTest : public MlasTestBase { private: - std::random_device rd_; // a seed source for the random number engine unsigned int seed_; std::mt19937 gen_; // mersenne_twister_engine seeded with rd() MatrixGuardBuffer A_, B_, C_, ref_, bias_; @@ -404,7 +401,7 @@ class MlasNeonFp16HQ4BitGemmKernelTest : public MlasTestBase { for (size_t m = 0; m < M; ++m) { for (size_t n = 0; n < N; ++n) { size_t i = m * Ldc + n; - ASSERT_TRUE(FloatEqual(target[i], ref[i], 0.015f, 0.03f)) + ASSERT_TRUE(FloatEqual(target[i], ref[i], 0.02f, 0.055f)) << " seed " << seed_ << " v0 " << target[i] << " v1 " << ref[i] << " m " << m << " n " << n; @@ -439,7 +436,7 @@ class MlasNeonFp16HQ4BitGemmKernelTest : public MlasTestBase { public: MlasNeonFp16HQ4BitGemmKernelTest() - : seed_(rd_()), gen_(seed_) { + : seed_(19287), gen_(seed_) { } static const char* GetTestSuiteName() { From cacd97dba3a1f2b9879e8bf5a60b23363c10b117 Mon Sep 17 00:00:00 2001 From: Wanming Lin Date: Thu, 5 Dec 2024 07:09:54 +0800 Subject: [PATCH 17/22] [WebNN] Improve the util function of creating WebNN constant MLOperand (#22935) Merge the util functions to create or retrieve: - A WebNN constant MLOperand filled with the specified value, data type, and shape. - A WebNN scalar constant MLOperand with the specified value and data type. --- .../webnn/builders/impl/conv_op_builder.cc | 4 +- .../webnn/builders/impl/dropout_op_builder.cc | 14 +-- .../webnn/builders/impl/gemm_op_builder.cc | 4 +- .../webnn/builders/impl/lrn_op_builder.cc | 11 +- .../builders/impl/normalization_op_builder.cc | 18 +-- .../webnn/builders/impl/qdq_op_builder.cc | 5 +- .../providers/webnn/builders/model_builder.cc | 68 ----------- .../providers/webnn/builders/model_builder.h | 110 +++++++++++------- 8 files changed, 89 insertions(+), 145 deletions(-) diff --git a/onnxruntime/core/providers/webnn/builders/impl/conv_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/conv_op_builder.cc index 329db75316e82..52fcc39ae5418 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/conv_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/conv_op_builder.cc @@ -311,12 +311,12 @@ Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N if (input_defs.size() >= 3) { x_zero_point = model_builder.GetOperand(node.InputDefs()[2]->Name()); } else { - x_zero_point = model_builder.GetZeroConstant(ONNX_NAMESPACE::TensorProto_DataType_UINT8); + x_zero_point = model_builder.CreateOrGetConstant(ONNX_NAMESPACE::TensorProto_DataType_UINT8, 0); } if (input_defs.size() >= 4) { w_zero_point = model_builder.GetOperand(node.InputDefs()[3]->Name()); } else { - w_zero_point = model_builder.GetZeroConstant(ONNX_NAMESPACE::TensorProto_DataType_UINT8); + w_zero_point = model_builder.CreateOrGetConstant(ONNX_NAMESPACE::TensorProto_DataType_UINT8, 0); } output = model_builder.GetBuilder().call("conv2dInteger", input, x_zero_point, filter, w_zero_point, options); diff --git a/onnxruntime/core/providers/webnn/builders/impl/dropout_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/dropout_op_builder.cc index 5434194a214ac..9bb930c63b009 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/dropout_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/dropout_op_builder.cc @@ -59,22 +59,14 @@ Status DropoutOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, std::vector mask_shape; ORT_RETURN_IF_NOT(GetShape(*output_defs[1], mask_shape, logger), "Cannot get mask output's shape"); std::vector dims = GetVecUint32FromVecInt64(mask_shape); - - emscripten::val desc = emscripten::val::object(); - desc.set("dataType", "uint8"); - desc.set("dimensions", emscripten::val::array(dims)); - desc.set("shape", emscripten::val::array(dims)); - const auto num_elements = narrow(Product(mask_shape)); - emscripten::val ones_buffer = emscripten::val::global("Uint8Array").new_(num_elements); - ones_buffer.call("fill", 1); - - emscripten::val mask_output = model_builder.GetBuilder().call("constant", desc, ones_buffer); + emscripten::val one_constant = model_builder.CreateOrGetConstant( + ONNX_NAMESPACE::TensorProto_DataType_BOOL, 1, dims); emscripten::val options = emscripten::val::object(); options.set("label", output_defs[1]->Name() + "_identity"); // Add additional identity op in case the mask is the output of a WebNN graph, // beacuse WebNN does not support a constant operand as output. - mask_output = model_builder.GetBuilder().call("identity", mask_output, options); + emscripten::val mask_output = model_builder.GetBuilder().call("identity", one_constant, options); model_builder.AddOperand(output_defs[1]->Name(), std::move(mask_output)); } return Status::OK(); diff --git a/onnxruntime/core/providers/webnn/builders/impl/gemm_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/gemm_op_builder.cc index 1477530ce1894..252d49a2f4d4d 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/gemm_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/gemm_op_builder.cc @@ -113,12 +113,12 @@ Status GemmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N if (input_defs.size() >= 3) { a_zero_point = model_builder.GetOperand(node.InputDefs()[2]->Name()); } else { - a_zero_point = model_builder.GetZeroConstant(ONNX_NAMESPACE::TensorProto_DataType_UINT8); + a_zero_point = model_builder.CreateOrGetConstant(ONNX_NAMESPACE::TensorProto_DataType_UINT8, 0); } if (input_defs.size() >= 4) { b_zero_point = model_builder.GetOperand(node.InputDefs()[3]->Name()); } else { - b_zero_point = model_builder.GetZeroConstant(ONNX_NAMESPACE::TensorProto_DataType_UINT8); + b_zero_point = model_builder.CreateOrGetConstant(ONNX_NAMESPACE::TensorProto_DataType_UINT8, 0); } output = model_builder.GetBuilder().call("matmulInteger", a, diff --git a/onnxruntime/core/providers/webnn/builders/impl/lrn_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/lrn_op_builder.cc index bdd1283c720f3..19f6d6aff8f97 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/lrn_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/lrn_op_builder.cc @@ -29,7 +29,8 @@ Status LRNOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node, const logging::Logger& logger) const { const auto& input_defs = node.InputDefs(); - const auto input_data_type = input_defs[0]->TypeAsProto()->tensor_type().elem_type(); + int32_t input_data_type; + ORT_RETURN_IF_NOT(GetType(*input_defs[0], input_data_type, logger), "Cannot get input type"); emscripten::val input = model_builder.GetOperand(input_defs[0]->Name()); const auto node_name = node.Name(); emscripten::val wnn_builder = model_builder.GetBuilder(); @@ -42,10 +43,10 @@ Status LRNOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, // Prepare WebNN constants for alpha, beta, bias attributes. // Assume T is float, because input_data_type has been limited to float32 and float16 in 'hasSupportedInitsImpl'. - emscripten::val alpha_constant = model_builder.CreateOrGetScalarConstant(input_data_type, alpha); - emscripten::val beta_constant = model_builder.CreateOrGetScalarConstant(input_data_type, beta); - emscripten::val bias_constant = model_builder.CreateOrGetScalarConstant(input_data_type, bias); - emscripten::val pow1_constant = model_builder.CreateOrGetScalarConstant(input_data_type, 2); + emscripten::val alpha_constant = model_builder.CreateOrGetConstant(input_data_type, alpha); + emscripten::val beta_constant = model_builder.CreateOrGetConstant(input_data_type, beta); + emscripten::val bias_constant = model_builder.CreateOrGetConstant(input_data_type, bias); + emscripten::val pow1_constant = model_builder.CreateOrGetConstant(input_data_type, 2); /** WebNN doesn't support LRN. So decompose it into a series of ops: diff --git a/onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc index fa82c2f85f0d8..79ed0393e3044 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc @@ -100,7 +100,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder X --> Pow --> ReduceMean --> Add --> Sqrt --> Div -> Mul ^ ^ ^ ^ ^ | | | | | - Y:2 axis B:epsilon A:X A:scale + Y:2 axis B:epsilon A:X A:scale */ int32_t input_type; @@ -108,13 +108,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder emscripten::val common_options = emscripten::val::object(); // Pow - emscripten::val pow_constant_desc = emscripten::val::object(); - ORT_RETURN_IF_NOT(SetWebnnDataType(pow_constant_desc, input_type), "Unsupported data type"); - pow_constant_desc.set("shape", emscripten::val::array()); - emscripten::val pow_buffer = emscripten::val::global("Float32Array").new_(1); - pow_buffer.set(0, 2); - emscripten::val pow_constant = - model_builder.GetBuilder().call("constant", pow_constant_desc, pow_buffer); + emscripten::val pow_constant = model_builder.CreateOrGetConstant(input_type, 2); common_options.set("label", node.Name() + "_pow"); emscripten::val pow = model_builder.GetBuilder().call("pow", input, pow_constant, common_options); @@ -127,13 +121,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder emscripten::val reduce_mean = model_builder.GetBuilder().call("reduceMean", pow, reduce_options); // Add - emscripten::val add_constant_desc = emscripten::val::object(); - ORT_RETURN_IF_NOT(SetWebnnDataType(add_constant_desc, input_type), "Unsupported data type"); - add_constant_desc.set("shape", emscripten::val::array()); - emscripten::val add_buffer = emscripten::val::global("Float32Array").new_(1); - add_buffer.set(0, epsilon); - emscripten::val add_constant = - model_builder.GetBuilder().call("constant", add_constant_desc, add_buffer); + emscripten::val add_constant = model_builder.CreateOrGetConstant(input_type, epsilon); common_options.set("label", node.Name() + "_add"); emscripten::val add = model_builder.GetBuilder().call("add", reduce_mean, add_constant, common_options); diff --git a/onnxruntime/core/providers/webnn/builders/impl/qdq_op_builder.cc b/onnxruntime/core/providers/webnn/builders/impl/qdq_op_builder.cc index 88fb79b146cd9..ca15e123d0999 100644 --- a/onnxruntime/core/providers/webnn/builders/impl/qdq_op_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/impl/qdq_op_builder.cc @@ -100,7 +100,10 @@ Status QDQOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, // zero_point has the same shape as the scale tensor. zero_point_shape = GetVecUint32FromVecInt64(scale_shape); } - zero_point = model_builder.GetZeroConstant(zero_point_type, zero_point_shape); + // Create a zero constant with the same shape as the scale tensor. + // The zero value has been pre-processed in the CreateOrGetConstant function, + // so the type of T is not relevant here. + zero_point = model_builder.CreateOrGetConstant(zero_point_type, 0, zero_point_shape); } emscripten::val options = emscripten::val::object(); diff --git a/onnxruntime/core/providers/webnn/builders/model_builder.cc b/onnxruntime/core/providers/webnn/builders/model_builder.cc index 8a82fce42189d..e8f116d390199 100644 --- a/onnxruntime/core/providers/webnn/builders/model_builder.cc +++ b/onnxruntime/core/providers/webnn/builders/model_builder.cc @@ -14,7 +14,6 @@ #include "core/providers/common.h" #include "core/providers/shared/utils/utils.h" -#include #include namespace onnxruntime { @@ -385,73 +384,6 @@ void ModelBuilder::AddOperand(const std::string& name, const emscripten::val& op wnn_operands_.insert(std::make_pair(name, operand)); } -// Get the zero constant with shape. -const emscripten::val& ModelBuilder::GetZeroConstant(const int32_t& data_type, - const std::vector& shape) { - std::string name = "webnn_zero_constant_" + std::to_string(data_type); - emscripten::val dims = emscripten::val::array(); - if (!shape.empty()) { - dims = emscripten::val::array(shape); - std::ostringstream name_stream; - name_stream << name; - for (const auto& dim : shape) { - name_stream << "_" << dim; - } - name = name_stream.str(); - } - // If the operand does not exist, create it. - if (wnn_operands_.find(name) == wnn_operands_.end()) { - emscripten::val desc = emscripten::val::object(); - desc.set("dimensions", dims); - desc.set("shape", dims); - emscripten::val zero_buffer = emscripten::val::undefined(); - if (!SetWebnnDataType(desc, data_type)) { - ORT_THROW("Unsupported data type: " + std::to_string(data_type)); - } - auto num_elements = Product(shape); - switch (data_type) { - case ONNX_NAMESPACE::TensorProto_DataType_INT4: - case ONNX_NAMESPACE::TensorProto_DataType_UINT4: - // For WebNN int4 and uint4 tensors are stored in Uint8Array, - // so we need to adjust the number of elements. - num_elements = (num_elements + 1) / 2; - zero_buffer = emscripten::val::global("Uint8Array").new_(num_elements); - break; - case ONNX_NAMESPACE::TensorProto_DataType_BOOL: - case ONNX_NAMESPACE::TensorProto_DataType_UINT8: - zero_buffer = emscripten::val::global("Uint8Array").new_(num_elements); - break; - case ONNX_NAMESPACE::TensorProto_DataType_INT8: - zero_buffer = emscripten::val::global("Int8Array").new_(num_elements); - break; - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: - zero_buffer = emscripten::val::global("Uint16Array").new_(num_elements); - break; - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: - zero_buffer = emscripten::val::global("Float32Array").new_(num_elements); - break; - case ONNX_NAMESPACE::TensorProto_DataType_INT32: - zero_buffer = emscripten::val::global("Int32Array").new_(num_elements); - break; - case ONNX_NAMESPACE::TensorProto_DataType_INT64: - zero_buffer = emscripten::val::global("BigInt64Array").new_(num_elements); - break; - case ONNX_NAMESPACE::TensorProto_DataType_UINT32: - zero_buffer = emscripten::val::global("Uint32Array").new_(num_elements); - break; - case ONNX_NAMESPACE::TensorProto_DataType_UINT64: - zero_buffer = emscripten::val::global("BigUint64Array").new_(num_elements); - break; - default: - break; - } - - emscripten::val zero_constant = wnn_builder_.call("constant", desc, zero_buffer); - wnn_operands_.insert(std::make_pair(name, zero_constant)); - } - return wnn_operands_.at(name); -} - void ModelBuilder::AddInitializerToSkip(const std::string& tensor_name) { skipped_initializers_.insert(tensor_name); } diff --git a/onnxruntime/core/providers/webnn/builders/model_builder.h b/onnxruntime/core/providers/webnn/builders/model_builder.h index c482e9d05b301..0fc2fa20670c7 100644 --- a/onnxruntime/core/providers/webnn/builders/model_builder.h +++ b/onnxruntime/core/providers/webnn/builders/model_builder.h @@ -11,6 +11,7 @@ #include "core/framework/execution_provider.h" #include "core/providers/webnn/builders/helper.h" +#include #include #include @@ -38,11 +39,10 @@ class ModelBuilder { const emscripten::val& GetOpSupportLimits() const { return wnn_limits_; } void AddOperand(const std::string& name, const emscripten::val& operand); - const emscripten::val& GetZeroConstant( - const int32_t& data_type, const std::vector& shape = {}); template - const emscripten::val& CreateOrGetScalarConstant(const int32_t& data_type, T value); + const emscripten::val& CreateOrGetConstant(const int32_t& data_type, T value, + const std::vector& shape = {}); // Use the buffers to persist WebNN allocated data like transposed weight. // It ensures the validity during inference session. @@ -103,11 +103,12 @@ class ModelBuilder { static const IOpBuilder* GetOpBuilder(const Node& node); }; -// Create a scalar constant MLOperand of the specified value and data type. -// Workaround for builer.constant(type, value) method since it has not been implemented now. +// Create or retrieve one of the following: +// - A WebNN constant MLOperand filled with the specified value, data type, and shape. +// - A WebNN scalar constant MLOperand with the specified value and data type. +// For scalar constant, it is workaround for builer.constant(type, value) method since +// it has not been implemented now. // https://webmachinelearning.github.io/webnn/#api-mlgraphbuilder-constant-type-value -// BTW, the spec is discussing if the builder.constant(type, value) should be dropped at -// https://github.com/webmachinelearning/webnn/issues/475. Fix me according to the spec decision. // // This function enforces a mapping between the data_type and the value types: // - TensorProto_DataType_INT4 <-> int8_t @@ -122,69 +123,96 @@ class ModelBuilder { // - TensorProto_DataType_UINT32 <-> uint32_t // - TensorProto_DataType_UINT64 <-> uint64_t template -const emscripten::val& ModelBuilder::CreateOrGetScalarConstant(const int32_t& data_type, T value) { - std::string name = "webnn_scalar_constant_" + std::to_string(data_type) + "_" + std::to_string(value); - emscripten::val desc = emscripten::val::object(); - desc.set("shape", emscripten::val::array()); - emscripten::val scalar_buffer = emscripten::val::undefined(); - uint16_t value_uint16 = 0; - uint8_t value_uint8 = 0; - if (!SetWebnnDataType(desc, data_type)) { - ORT_THROW("Unsupported data type: " + std::to_string(data_type)); +const emscripten::val& ModelBuilder::CreateOrGetConstant(const int32_t& data_type, T value, + const std::vector& shape) { + std::string name = "webnn_constant_" + std::to_string(data_type) + "_" + std::to_string(value); + emscripten::val dims = emscripten::val::array(); + if (!shape.empty()) { + dims = emscripten::val::array(shape); + std::ostringstream name_stream; + name_stream << name; + for (const auto& dim : shape) { + name_stream << "_" << dim; + } + name = name_stream.str(); } // If the operand does not exist, create it. if (wnn_operands_.find(name) == wnn_operands_.end()) { + emscripten::val desc = emscripten::val::object(); + desc.set("shape", dims); + desc.set("dimensions", dims); + emscripten::val buffer = emscripten::val::undefined(); + if (!SetWebnnDataType(desc, data_type)) { + ORT_THROW("Unsupported data type: " + std::to_string(data_type)); + } + auto num_elements = Product(shape); switch (data_type) { case ONNX_NAMESPACE::TensorProto_DataType_INT4: case ONNX_NAMESPACE::TensorProto_DataType_UINT4: - scalar_buffer = emscripten::val::global("Uint8Array").new_(1); - value_uint8 = PackInt8ToUint8AsNibble(value, data_type); - scalar_buffer.call("fill", emscripten::val(value_uint8)); + // For WebNN int4 and uint4 tensors are stored in Uint8Array, + // so we need to adjust the number of elements. + num_elements = (num_elements + 1) / 2; + buffer = emscripten::val::global("Uint8Array").new_(num_elements); + if (value) { + buffer.call("fill", emscripten::val(PackInt8ToUint8AsNibble(value, data_type))); + } break; case ONNX_NAMESPACE::TensorProto_DataType_BOOL: - scalar_buffer = emscripten::val::global("Uint8Array").new_(1); - scalar_buffer.call("fill", emscripten::val(value ? 1 : 0)); - break; case ONNX_NAMESPACE::TensorProto_DataType_UINT8: - scalar_buffer = emscripten::val::global("Uint8Array").new_(1); - scalar_buffer.call("fill", emscripten::val(value)); + buffer = emscripten::val::global("Uint8Array").new_(num_elements); + if (value) { + buffer.call("fill", emscripten::val(value)); + } break; case ONNX_NAMESPACE::TensorProto_DataType_INT8: - scalar_buffer = emscripten::val::global("Int8Array").new_(1); - scalar_buffer.call("fill", emscripten::val(value)); + buffer = emscripten::val::global("Int8Array").new_(num_elements); + if (value) { + buffer.call("fill", emscripten::val(value)); + } break; case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: - scalar_buffer = emscripten::val::global("Uint16Array").new_(1); - value_uint16 = PackFloat32ToUint16AsFloat16(value); - scalar_buffer.call("fill", emscripten::val(value_uint16)); + buffer = emscripten::val::global("Uint16Array").new_(num_elements); + if (value) { + buffer.call("fill", emscripten::val(PackFloat32ToUint16AsFloat16(value))); + } break; case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: - scalar_buffer = emscripten::val::global("Float32Array").new_(1); - scalar_buffer.call("fill", emscripten::val(value)); + buffer = emscripten::val::global("Float32Array").new_(num_elements); + if (value) { + buffer.call("fill", emscripten::val(value)); + } break; case ONNX_NAMESPACE::TensorProto_DataType_INT32: - scalar_buffer = emscripten::val::global("Int32Array").new_(1); - scalar_buffer.call("fill", emscripten::val(value)); + buffer = emscripten::val::global("Int32Array").new_(num_elements); + if (value) { + buffer.call("fill", emscripten::val(value)); + } break; case ONNX_NAMESPACE::TensorProto_DataType_UINT32: - scalar_buffer = emscripten::val::global("Uint32Array").new_(1); - scalar_buffer.call("fill", emscripten::val(value)); + buffer = emscripten::val::global("Uint32Array").new_(num_elements); + if (value) { + buffer.call("fill", emscripten::val(value)); + } break; case ONNX_NAMESPACE::TensorProto_DataType_INT64: - scalar_buffer = emscripten::val::global("BigInt64Array").new_(1); - scalar_buffer.call("fill", emscripten::val::global("BigInt")(value)); + buffer = emscripten::val::global("BigInt64Array").new_(num_elements); + if (value) { + buffer.call("fill", emscripten::val::global("BigInt")(value)); + } break; case ONNX_NAMESPACE::TensorProto_DataType_UINT64: - scalar_buffer = emscripten::val::global("BigUint64Array").new_(1); - scalar_buffer.call("fill", emscripten::val::global("BigInt")(value)); + buffer = emscripten::val::global("BigUint64Array").new_(num_elements); + if (value) { + buffer.call("fill", emscripten::val::global("BigInt")(value)); + } break; default: break; } - const emscripten::val scalar_constant = wnn_builder_.call("constant", desc, scalar_buffer); - wnn_operands_.insert(std::make_pair(name, scalar_constant)); + const emscripten::val constant = wnn_builder_.call("constant", desc, buffer); + wnn_operands_.insert(std::make_pair(name, constant)); } return wnn_operands_.at(name); From 3975e793034c02127964cb1985e55651dd797648 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 23:52:24 +0000 Subject: [PATCH 18/22] Bump axios from 1.6.1 to 1.7.9 in /js/node (#23009) --- js/node/package-lock.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/js/node/package-lock.json b/js/node/package-lock.json index 1f95b0c206ffa..6d3c96e579a47 100644 --- a/js/node/package-lock.json +++ b/js/node/package-lock.json @@ -276,12 +276,12 @@ "dev": true }, "node_modules/axios": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.1.tgz", - "integrity": "sha512-vfBmhDpKafglh0EldBEbVuoe7DyAavGSLWhuSm5ZSEKQnHhBf0xAAwybbNH1IkrJNGnS/VG4I5yxig1pCEXE4g==", + "version": "1.7.9", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.9.tgz", + "integrity": "sha512-LhLcE7Hbiryz8oMDdDptSrWowmB4Bl6RCt6sIJKpRB4XtVf0iEgewX3au/pJqm+Py1kCASkb/FFKjxQaLtxJvw==", "dev": true, "dependencies": { - "follow-redirects": "^1.15.0", + "follow-redirects": "^1.15.6", "form-data": "^4.0.0", "proxy-from-env": "^1.1.0" } @@ -1581,12 +1581,12 @@ "dev": true }, "axios": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.1.tgz", - "integrity": "sha512-vfBmhDpKafglh0EldBEbVuoe7DyAavGSLWhuSm5ZSEKQnHhBf0xAAwybbNH1IkrJNGnS/VG4I5yxig1pCEXE4g==", + "version": "1.7.9", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.9.tgz", + "integrity": "sha512-LhLcE7Hbiryz8oMDdDptSrWowmB4Bl6RCt6sIJKpRB4XtVf0iEgewX3au/pJqm+Py1kCASkb/FFKjxQaLtxJvw==", "dev": true, "requires": { - "follow-redirects": "^1.15.0", + "follow-redirects": "^1.15.6", "form-data": "^4.0.0", "proxy-from-env": "^1.1.0" } From 3234487385054a369dc7061d29030a3ae233dc31 Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Wed, 4 Dec 2024 16:44:09 -0800 Subject: [PATCH 19/22] [js] remove more unused training types (#22753) ### Description remove more unused training types --- js/.eslintrc.js | 13 -- js/common/lib/backend.ts | 36 ---- js/common/lib/env.ts | 2 - js/common/lib/index.ts | 1 - js/common/lib/training-session-impl.ts | 273 ------------------------- js/common/lib/training-session.ts | 206 ------------------- 6 files changed, 531 deletions(-) delete mode 100644 js/common/lib/training-session-impl.ts delete mode 100644 js/common/lib/training-session.ts diff --git a/js/.eslintrc.js b/js/.eslintrc.js index bd1e9061355f5..462e417df1d66 100644 --- a/js/.eslintrc.js +++ b/js/.eslintrc.js @@ -198,19 +198,6 @@ module.exports = { '_OrtReleaseTensor', '_OrtRun', '_OrtRunWithBinding', - '_OrtTrainingCopyParametersFromBuffer', - '_OrtTrainingCopyParametersToBuffer', - '_OrtTrainingCreateSession', - '_OrtTrainingEvalStep', - '_OrtTrainingGetModelInputOutputCount', - '_OrtTrainingGetModelInputOutputName', - '_OrtTrainingGetParametersSize', - '_OrtTrainingLazyResetGrad', - '_OrtTrainingLoadCheckpoint', - '_OrtTrainingOptimizerStep', - '_OrtTrainingReleaseCheckpoint', - '_OrtTrainingReleaseSession', - '_OrtTrainingRunTrainStep', ], }, ], diff --git a/js/common/lib/backend.ts b/js/common/lib/backend.ts index e27e67622aa82..e63f9c6c9147f 100644 --- a/js/common/lib/backend.ts +++ b/js/common/lib/backend.ts @@ -3,7 +3,6 @@ import { InferenceSession } from './inference-session.js'; import { OnnxValue } from './onnx-value.js'; -import { TrainingSession } from './training-session.js'; /** * @ignore @@ -42,33 +41,6 @@ export interface InferenceSessionHandler extends SessionHandler { ): Promise; } -/** - * Represent a handler instance of a training inference session. - * - * @ignore - */ -export interface TrainingSessionHandler extends SessionHandler { - readonly evalInputNames: readonly string[]; - readonly evalOutputNames: readonly string[]; - - lazyResetGrad(): Promise; - runTrainStep( - feeds: SessionHandler.FeedsType, - fetches: SessionHandler.FetchesType, - options: InferenceSession.RunOptions, - ): Promise; - runOptimizerStep(options: InferenceSession.RunOptions): Promise; - runEvalStep( - feeds: SessionHandler.FeedsType, - fetches: SessionHandler.FetchesType, - options: InferenceSession.RunOptions, - ): Promise; - - getParametersSize(trainableOnly: boolean): Promise; - loadParametersBuffer(buffer: Uint8Array, trainableOnly: boolean): Promise; - getContiguousParameters(trainableOnly: boolean): Promise; -} - /** * Represent a backend that provides implementation of model inferencing. * @@ -84,14 +56,6 @@ export interface Backend { uriOrBuffer: string | Uint8Array, options?: InferenceSession.SessionOptions, ): Promise; - - createTrainingSessionHandler?( - checkpointStateUriOrBuffer: TrainingSession.UriOrBuffer, - trainModelUriOrBuffer: TrainingSession.UriOrBuffer, - evalModelUriOrBuffer: TrainingSession.UriOrBuffer, - optimizerModelUriOrBuffer: TrainingSession.UriOrBuffer, - options: InferenceSession.SessionOptions, - ): Promise; } export { registerBackend } from './backend-impl.js'; diff --git a/js/common/lib/env.ts b/js/common/lib/env.ts index 642a897a90d26..adb6a440cf22a 100644 --- a/js/common/lib/env.ts +++ b/js/common/lib/env.ts @@ -14,7 +14,6 @@ export declare namespace Env { * If not modified, the filename of the .wasm file is: * - `ort-wasm-simd-threaded.wasm` for default build * - `ort-wasm-simd-threaded.jsep.wasm` for JSEP build (with WebGPU and WebNN) - * - `ort-training-wasm-simd-threaded.wasm` for training build */ wasm?: URL | string; /** @@ -25,7 +24,6 @@ export declare namespace Env { * If not modified, the filename of the .mjs file is: * - `ort-wasm-simd-threaded.mjs` for default build * - `ort-wasm-simd-threaded.jsep.mjs` for JSEP build (with WebGPU and WebNN) - * - `ort-training-wasm-simd-threaded.mjs` for training build */ mjs?: URL | string; } diff --git a/js/common/lib/index.ts b/js/common/lib/index.ts index 3ed56b3c2e812..d75e6a477258d 100644 --- a/js/common/lib/index.ts +++ b/js/common/lib/index.ts @@ -26,4 +26,3 @@ export * from './tensor-factory.js'; export * from './trace.js'; export * from './onnx-model.js'; export * from './onnx-value.js'; -export * from './training-session.js'; diff --git a/js/common/lib/training-session-impl.ts b/js/common/lib/training-session-impl.ts deleted file mode 100644 index 21dbe5fe51bb9..0000000000000 --- a/js/common/lib/training-session-impl.ts +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -import { resolveBackendAndExecutionProviders } from './backend-impl.js'; -import { SessionHandler, TrainingSessionHandler } from './backend.js'; -import { InferenceSession as InferenceSession } from './inference-session.js'; -import { OnnxValue } from './onnx-value.js'; -import { Tensor } from './tensor.js'; -import { TrainingSession as TrainingSessionInterface, TrainingSessionCreateOptions } from './training-session.js'; - -type SessionOptions = InferenceSession.SessionOptions; -type FeedsType = InferenceSession.FeedsType; -type FetchesType = InferenceSession.FetchesType; -type ReturnType = InferenceSession.ReturnType; -type RunOptions = InferenceSession.RunOptions; - -const noBackendErrMsg: string = - 'Training backend could not be resolved. ' + "Make sure you're using the correct configuration & WebAssembly files."; - -export class TrainingSession implements TrainingSessionInterface { - private constructor(handler: TrainingSessionHandler, hasOptimizerModel: boolean, hasEvalModel: boolean) { - this.handler = handler; - this.hasOptimizerModel = hasOptimizerModel; - this.hasEvalModel = hasEvalModel; - } - private handler: TrainingSessionHandler; - private hasOptimizerModel: boolean; - private hasEvalModel: boolean; - - get trainingInputNames(): readonly string[] { - return this.handler.inputNames; - } - get trainingOutputNames(): readonly string[] { - return this.handler.outputNames; - } - - get evalInputNames(): readonly string[] { - if (this.hasEvalModel) { - return this.handler.evalInputNames; - } else { - throw new Error('This training session has no evalModel loaded.'); - } - } - get evalOutputNames(): readonly string[] { - if (this.hasEvalModel) { - return this.handler.evalOutputNames; - } else { - throw new Error('This training session has no evalModel loaded.'); - } - } - - static async create( - trainingOptions: TrainingSessionCreateOptions, - sessionOptions?: SessionOptions, - ): Promise { - const evalModel: string | Uint8Array = trainingOptions.evalModel || ''; - const optimizerModel: string | Uint8Array = trainingOptions.optimizerModel || ''; - const options: SessionOptions = sessionOptions || {}; - - // resolve backend, update session options with validated EPs, and create session handler - const [backend, optionsWithValidatedEPs] = await resolveBackendAndExecutionProviders(options); - if (backend.createTrainingSessionHandler) { - const handler = await backend.createTrainingSessionHandler( - trainingOptions.checkpointState, - trainingOptions.trainModel, - evalModel, - optimizerModel, - optionsWithValidatedEPs, - ); - return new TrainingSession(handler, !!trainingOptions.optimizerModel, !!trainingOptions.evalModel); - } else { - throw new Error(noBackendErrMsg); - } - } - - /** - * Helper function for runTrainStep and future runStep methods that handles the type-narrowing conversion from - * the given parameters to SessionHandler.FetchesType and RunOptions. - * - * @param inputNames the feeds object is checked that they contain all input names in the provided list of input - * names. - * @param outputNames the fetches object is checked that their keys match up with valid names in the list of output - * names. - * @param feeds the required input - * @param arg1 narrowed & converted into the SessionHandler.FetchesType or RunOptions object - * @param arg2 optional RunOptions object. - * @returns - */ - typeNarrowingForRunStep( - inputNames: readonly string[], - outputNames: readonly string[], - feeds: FeedsType, - arg1?: FetchesType | RunOptions, - arg2?: RunOptions, - ): [SessionHandler.FetchesType, RunOptions] { - const fetches: { [name: string]: OnnxValue | null } = {}; - let options: RunOptions = {}; - // check inputs - if (typeof feeds !== 'object' || feeds === null || feeds instanceof Tensor || Array.isArray(feeds)) { - throw new TypeError( - "'feeds' must be an object that use input names as keys and OnnxValue as corresponding values.", - ); - } - - let isFetchesEmpty = true; - // determine which override is being used - if (typeof arg1 === 'object') { - if (arg1 === null) { - throw new TypeError('Unexpected argument[1]: cannot be null.'); - } - if (arg1 instanceof Tensor) { - throw new TypeError("'fetches' cannot be a Tensor"); - } - - if (Array.isArray(arg1)) { - if (arg1.length === 0) { - throw new TypeError("'fetches' cannot be an empty array."); - } - isFetchesEmpty = false; - // output names - for (const name of arg1) { - if (typeof name !== 'string') { - throw new TypeError("'fetches' must be a string array or an object."); - } - if (outputNames.indexOf(name) === -1) { - throw new RangeError(`'fetches' contains invalid output name: ${name}.`); - } - fetches[name] = null; - } - - if (typeof arg2 === 'object' && arg2 !== null) { - options = arg2; - } else if (typeof arg2 !== 'undefined') { - throw new TypeError("'options' must be an object."); - } - } else { - // decide whether arg1 is fetches or options - // if any output name is present and its value is valid OnnxValue, we consider it fetches - let isFetches = false; - const arg1Keys = Object.getOwnPropertyNames(arg1); - for (const name of outputNames) { - if (arg1Keys.indexOf(name) !== -1) { - const v = (arg1 as InferenceSession.NullableOnnxValueMapType)[name]; - if (v === null || v instanceof Tensor) { - isFetches = true; - isFetchesEmpty = false; - fetches[name] = v; - } - } - } - - if (isFetches) { - if (typeof arg2 === 'object' && arg2 !== null) { - options = arg2; - } else if (typeof arg2 !== 'undefined') { - throw new TypeError("'options' must be an object."); - } - } else { - options = arg1 as RunOptions; - } - } - } else if (typeof arg1 !== 'undefined') { - throw new TypeError("Unexpected argument[1]: must be 'fetches' or 'options'."); - } - - // check if all inputs are in feed - for (const name of inputNames) { - if (typeof feeds[name] === 'undefined') { - throw new Error(`input '${name}' is missing in 'feeds'.`); - } - } - - // if no fetches is specified, we use the full output names list - if (isFetchesEmpty) { - for (const name of outputNames) { - fetches[name] = null; - } - } - - return [fetches, options]; - } - - /** - * Helper method for runTrainStep and any other runStep methods. Takes the ReturnType result from the SessionHandler - * and changes it into a map of Tensors. - * - * @param results - * @returns - */ - convertHandlerReturnTypeToMapOfTensors(results: SessionHandler.ReturnType): ReturnType { - const returnValue: { [name: string]: OnnxValue } = {}; - for (const key in results) { - if (Object.hasOwnProperty.call(results, key)) { - const result = results[key]; - if (result instanceof Tensor) { - returnValue[key] = result; - } else { - returnValue[key] = new Tensor(result.type, result.data, result.dims); - } - } - } - return returnValue; - } - - async lazyResetGrad(): Promise { - await this.handler.lazyResetGrad(); - } - - runTrainStep(feeds: FeedsType, options?: RunOptions): Promise; - runTrainStep(feeds: FeedsType, fetches: FetchesType, options?: RunOptions): Promise; - async runTrainStep(feeds: FeedsType, arg1?: FetchesType | RunOptions, arg2?: RunOptions): Promise { - const [fetches, options] = this.typeNarrowingForRunStep( - this.trainingInputNames, - this.trainingOutputNames, - feeds, - arg1, - arg2, - ); - const results = await this.handler.runTrainStep(feeds, fetches, options); - return this.convertHandlerReturnTypeToMapOfTensors(results); - } - - async runOptimizerStep(options?: InferenceSession.RunOptions | undefined): Promise { - if (this.hasOptimizerModel) { - await this.handler.runOptimizerStep(options || {}); - } else { - throw new Error('This TrainingSession has no OptimizerModel loaded.'); - } - } - - runEvalStep(feeds: FeedsType, options?: RunOptions | undefined): Promise; - runEvalStep(feeds: FeedsType, fetches: FetchesType, options?: RunOptions | undefined): Promise; - async runEvalStep(feeds: FeedsType, arg1?: FetchesType | RunOptions, arg2?: RunOptions): Promise { - if (this.hasEvalModel) { - const [fetches, options] = this.typeNarrowingForRunStep( - this.evalInputNames, - this.evalOutputNames, - feeds, - arg1, - arg2, - ); - const results = await this.handler.runEvalStep(feeds, fetches, options); - return this.convertHandlerReturnTypeToMapOfTensors(results); - } else { - throw new Error('This TrainingSession has no EvalModel loaded.'); - } - } - - async getParametersSize(trainableOnly = true): Promise { - return this.handler.getParametersSize(trainableOnly); - } - - async loadParametersBuffer(array: Uint8Array, trainableOnly = true): Promise { - const paramsSize = await this.getParametersSize(trainableOnly); - // checking that the size of the Uint8Array is equivalent to the byte length of a Float32Array of the number - // of parameters - if (array.length !== 4 * paramsSize) { - throw new Error( - 'Size of the buffer passed into loadParametersBuffer must match the number of parameters in ' + - 'the model. Please use getParametersSize method to check.', - ); - } - return this.handler.loadParametersBuffer(array, trainableOnly); - } - - async getContiguousParameters(trainableOnly = true): Promise { - return this.handler.getContiguousParameters(trainableOnly); - } - - async release(): Promise { - return this.handler.dispose(); - } -} diff --git a/js/common/lib/training-session.ts b/js/common/lib/training-session.ts deleted file mode 100644 index 45dcafc46deb5..0000000000000 --- a/js/common/lib/training-session.ts +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -import { InferenceSession } from './inference-session.js'; -import { OnnxValue } from './onnx-value.js'; -import { TrainingSession as TrainingSessionImpl } from './training-session-impl.js'; - -/* eslint-disable @typescript-eslint/no-redeclare */ - -export declare namespace TrainingSession { - /** - * Either URI file path (string) or Uint8Array containing model or checkpoint information. - */ - type UriOrBuffer = string | Uint8Array; -} - -/** - * Represent a runtime instance of an ONNX training session, - * which contains a model that can be trained, and, optionally, - * an eval and optimizer model. - */ -export interface TrainingSession { - // #region run() - - /** - * Lazily resets the gradients of all trainable parameters to zero. Should happen after the invocation of - * runOptimizerStep. - */ - lazyResetGrad(): Promise; - - /** - * Run TrainStep asynchronously with the given feeds and options. - * - * @param feeds - Representation of the model input. See type description of `InferenceSession.InputType` for - detail. - * @param options - Optional. A set of options that controls the behavior of model training. - * @returns A promise that resolves to a map, which uses output names as keys and OnnxValue as corresponding values. - */ - runTrainStep( - feeds: InferenceSession.FeedsType, - options?: InferenceSession.RunOptions, - ): Promise; - - /** - * Run a single train step with the given inputs and options. - * - * @param feeds - Representation of the model input. - * @param fetches - Representation of the model output. - * detail. - * @param options - Optional. A set of options that controls the behavior of model training. - * @returns A promise that resolves to a map, which uses output names as keys and OnnxValue as corresponding - values. - */ - runTrainStep( - feeds: InferenceSession.FeedsType, - fetches: InferenceSession.FetchesType, - options?: InferenceSession.RunOptions, - ): Promise; - - /** - * Runs a single optimizer step, which performs weight updates for the trainable parameters using the optimizer model. - * - * @param options - Optional. A set of options that controls the behavior of model optimizing. - */ - runOptimizerStep(options?: InferenceSession.RunOptions): Promise; - - /** - * Run a single eval step with the given inputs and options using the eval model. - * - * @param feeds - Representation of the model input. - * @param options - Optional. A set of options that controls the behavior of model eval step. - * @returns A promise that resolves to a map, which uses output names as keys and OnnxValue as corresponding - values. - */ - runEvalStep( - feeds: InferenceSession.FeedsType, - options?: InferenceSession.RunOptions, - ): Promise; - - /** - * Run a single eval step with the given inputs and options using the eval model. - * - * @param feeds - Representation of the model input. - * @param fetches - Representation of the model output. - * detail. - * @param options - Optional. A set of options that controls the behavior of model eval step. - * @returns A promise that resolves to a map, which uses output names as keys and OnnxValue as corresponding - values. - */ - runEvalStep( - feeds: InferenceSession.FeedsType, - fetches: InferenceSession.FetchesType, - options?: InferenceSession.RunOptions, - ): Promise; - - // #endregion - - // #region copy parameters - - /** - * Retrieves the size of all parameters for the training state. Calculates the total number of primitive (datatype of - * the parameters) elements of all the parameters in the training state. - * - * @param trainableOnly - When set to true, the size is calculated for trainable params only. Default value is true. - */ - getParametersSize(trainableOnly: boolean): Promise; - - /** - * Copies parameter values from the given buffer to the training state. Currently, only supporting models with - * parameters of type Float32. - * - * @param buffer - A Uint8Array representation of Float32 parameters. - * @param trainableOnly - True if trainable parameters only to be modified, false otherwise. Default value is true. - */ - loadParametersBuffer(buffer: Uint8Array, trainableOnly: boolean): Promise; - - /** - * Copies the model parameters to a contiguous buffer. Usually used in the context of Federated Learning. - * Currently, only supporting models with parameters of type Float32. - * - * @param trainableOnly - When set to true, only trainable parameters are copied. Trainable parameters are parameters - * for which requires_grad is set to true. Default value is true. - * @returns A promise that resolves to a Float32 OnnxValue of the requested parameters. - */ - getContiguousParameters(trainableOnly: boolean): Promise; - // #endregion - - // #region release() - - /** - * Release the inference session and the underlying resources. - */ - release(): Promise; - // #endregion - - // #region metadata - - /** - * Get input names of the loaded training model. - */ - readonly trainingInputNames: readonly string[]; - - /** - * Get output names of the loaded training model. - */ - readonly trainingOutputNames: readonly string[]; - - /** - * Get input names of the loaded eval model. Is an empty array if no eval model is loaded. - */ - readonly evalInputNames: readonly string[]; - - /** - * Get output names of the loaded eval model. Is an empty array if no eval model is loaded. - */ - readonly evalOutputNames: readonly string[]; - - // #endregion -} - -/** - * Represents the optional parameters that can be passed into the TrainingSessionFactory. - */ -export interface TrainingSessionCreateOptions { - /** - * URI or buffer for a .ckpt file that contains the checkpoint for the training model. - */ - checkpointState: TrainingSession.UriOrBuffer; - /** - * URI or buffer for the .onnx training file. - */ - trainModel: TrainingSession.UriOrBuffer; - /** - * Optional. URI or buffer for the .onnx optimizer model file. - */ - optimizerModel?: TrainingSession.UriOrBuffer; - /** - * Optional. URI or buffer for the .onnx eval model file. - */ - evalModel?: TrainingSession.UriOrBuffer; -} - -/** - * Defines method overload possibilities for creating a TrainingSession. - */ -export interface TrainingSessionFactory { - // #region create() - - /** - * Creates a new TrainingSession and asynchronously loads any models passed in through trainingOptions - * - * @param trainingOptions specify models and checkpoints to load into the Training Session - * @param sessionOptions specify configuration for training session behavior - * - * @returns Promise that resolves to a TrainingSession object - */ - create( - trainingOptions: TrainingSessionCreateOptions, - sessionOptions?: InferenceSession.SessionOptions, - ): Promise; - - // #endregion -} - -// eslint-disable-next-line @typescript-eslint/naming-convention -export const TrainingSession: TrainingSessionFactory = TrainingSessionImpl; From f340b3cad3a3c64be13f77cdd9a20dcd9ff72942 Mon Sep 17 00:00:00 2001 From: Jian Chen Date: Wed, 4 Dec 2024 21:20:12 -0500 Subject: [PATCH 20/22] Adding DML to python cuda package (#22606) --- .../test/python/onnx_backend_test_series.py | 41 ++++++++++++------- .../onnx_backend_test_series_filters.jsonc | 7 ++++ .../jobs/steps/py_packaging_test_step.yml | 21 ++++++++++ .../stages/py-gpu-packaging-stage.yml | 2 +- .../stages/py-win-gpu-stage.yml | 27 ++++++------ 5 files changed, 70 insertions(+), 28 deletions(-) create mode 100644 tools/ci_build/github/azure-pipelines/stages/jobs/steps/py_packaging_test_step.yml diff --git a/onnxruntime/test/python/onnx_backend_test_series.py b/onnxruntime/test/python/onnx_backend_test_series.py index 8fc76da3495a8..a274b90dc042f 100644 --- a/onnxruntime/test/python/onnx_backend_test_series.py +++ b/onnxruntime/test/python/onnx_backend_test_series.py @@ -105,7 +105,7 @@ def load_jsonc(basename: str): return json.loads("\n".join(lines)) -def create_backend_test(test_name=None): +def create_backend_test(devices: list[str], test_name=None): """Creates an OrtBackendTest and adds its TestCase's to global scope so unittest will find them.""" overrides = load_jsonc("onnx_backend_test_series_overrides.jsonc") @@ -126,30 +126,29 @@ def create_backend_test(test_name=None): else: filters = load_jsonc("onnx_backend_test_series_filters.jsonc") current_failing_tests = apply_filters(filters, "current_failing_tests") - if platform.architecture()[0] == "32bit": current_failing_tests += apply_filters(filters, "current_failing_tests_x86") - if backend.supports_device("DNNL"): + if backend.supports_device("DNNL") or "DNNL" in devices: current_failing_tests += apply_filters(filters, "current_failing_tests_DNNL") - if backend.supports_device("NNAPI"): + if backend.supports_device("NNAPI") or "NNAPI" in devices: current_failing_tests += apply_filters(filters, "current_failing_tests_NNAPI") - if backend.supports_device("OPENVINO_GPU"): + if backend.supports_device("OPENVINO_GPU") or "OPENVINO_GPU" in devices: current_failing_tests += apply_filters(filters, "current_failing_tests_OPENVINO_GPU") - if backend.supports_device("OPENVINO_CPU"): + if backend.supports_device("OPENVINO_CPU") or "OPENVINO_CPU" in devices: current_failing_tests += apply_filters(filters, "current_failing_tests_OPENVINO_CPU_FP32") current_failing_tests += apply_filters(filters, "current_failing_tests_OPENVINO_CPU_FP16") - if backend.supports_device("OPENVINO_NPU"): + if backend.supports_device("OPENVINO_NPU") or "OPENVINO_NPU" in devices: current_failing_tests += apply_filters(filters, "current_failing_tests_OPENVINO_NPU") - if backend.supports_device("OPENVINO"): + if backend.supports_device("OPENVINO") or "OPENVINO" in devices: current_failing_tests += apply_filters(filters, "current_failing_tests_OPENVINO_opset18") - if backend.supports_device("MIGRAPHX"): + if backend.supports_device("MIGRAPHX") or "MIGRAPHX" in devices: current_failing_tests += apply_filters(filters, "current_failing_tests_MIGRAPHX") if backend.supports_device("WEBGPU"): @@ -158,8 +157,16 @@ def create_backend_test(test_name=None): # Skip these tests for a "pure" DML onnxruntime python wheel. We keep these tests enabled for instances where both DML and CUDA # EPs are available (Windows GPU CI pipeline has this config) - these test will pass because CUDA has higher precedence than DML # and the nodes are assigned to only the CUDA EP (which supports these tests) - if backend.supports_device("DML") and not backend.supports_device("GPU"): + if (backend.supports_device("DML") and not backend.supports_device("GPU")) or "DML" in devices: current_failing_tests += apply_filters(filters, "current_failing_tests_pure_DML") + # exclude CUDA EP when DML test is running. + os.environ["ORT_ONNX_BACKEND_EXCLUDE_PROVIDERS"] = "TensorrtExecutionProvider,CUDAExecutionProvider" + elif backend.supports_device("DML") and "DML" not in devices: + # exclude DML EP when CUDA test is running. + os.environ["ORT_ONNX_BACKEND_EXCLUDE_PROVIDERS"] = "TensorrtExecutionProvider,DmlExecutionProvider" + else: + # exclude TRT EP temporarily and only test CUDA EP to retain previous behavior + os.environ["ORT_ONNX_BACKEND_EXCLUDE_PROVIDERS"] = "TensorrtExecutionProvider" filters = ( current_failing_tests @@ -172,9 +179,6 @@ def create_backend_test(test_name=None): backend_test.exclude("(" + "|".join(filters) + ")") print("excluded tests:", filters) - # exclude TRT EP temporarily and only test CUDA EP to retain previous behavior - os.environ["ORT_ONNX_BACKEND_EXCLUDE_PROVIDERS"] = "TensorrtExecutionProvider" - # import all test cases at global scope to make # them visible to python.unittest. globals().update(backend_test.enable_report().test_cases) @@ -199,6 +203,15 @@ def parse_args(): help="Only run tests that match this value. Matching is regex based, and '.*' is automatically appended", ) + parser.add_argument( + "--devices", + type=str, + choices=["CPU", "CUDA", "MIGRAPHX", "DNNL", "DML", "OPENVINO_GPU", "OPENVINO_CPU", "OPENVINO_NPU", "OPENVINO"], + nargs="+", # allows multiple values + default=["CPU"], # default to ["CPU"] if no input is given + help="Select one or more devices CPU, CUDA, MIGRAPHX, DNNL, DML, OPENVINO_GPU, OPENVINO_CPU, OPENVINO_NPU, OPENVINO", + ) + # parse just our args. python unittest has its own args and arg parsing, and that runs inside unittest.main() parsed, unknown = parser.parse_known_args() sys.argv = sys.argv[:1] + unknown @@ -209,5 +222,5 @@ def parse_args(): if __name__ == "__main__": args = parse_args() - create_backend_test(args.test_name) + create_backend_test(args.devices, args.test_name) unittest.main() diff --git a/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc b/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc index f083ab14ad133..7ecaab6fedb02 100644 --- a/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc +++ b/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc @@ -750,6 +750,13 @@ "^test_reduce_log_sum_empty_set_cpu", "^test_reduce_log_sum_exp_empty_set_cpu", "^test_reduce_prod_empty_set_cpu", + // Bug: DML EP some how executes these CUDA tests and failed + // TODO: Remove these tests when DML EP is fixed + "^test_convtranspose_autopad_same_cuda", + "^test_asin_example_cuda", + "^test_dynamicquantizelinear_cuda", + "^test_dynamicquantizelinear_expanded_cuda", + "^test_reduce_min_empty_set_cuda", //Bug: DML EP does not execute operators with an empty input tensor //TODO: Resolve as a graph implementation that returns a constant inf tensor with appropriate strides "^test_reduce_min_empty_set_cpu" diff --git a/tools/ci_build/github/azure-pipelines/stages/jobs/steps/py_packaging_test_step.yml b/tools/ci_build/github/azure-pipelines/stages/jobs/steps/py_packaging_test_step.yml new file mode 100644 index 0000000000000..9a721c65de332 --- /dev/null +++ b/tools/ci_build/github/azure-pipelines/stages/jobs/steps/py_packaging_test_step.yml @@ -0,0 +1,21 @@ +parameters: +- name: EP_NAME + type: string + default: CPU + +- name: PYTHON_VERSION + type: string + +steps: +- powershell: | + python -m pip uninstall -y onnxruntime onnxruntime-gpu -qq + Get-ChildItem -Path $(Build.ArtifactStagingDirectory)/*cp${{ replace(parameters.PYTHON_VERSION,'.','') }}*.whl | foreach {pip --disable-pip-version-check install --upgrade $_.fullname tabulate} + mkdir -p $(Agent.TempDirectory)\ort_test_data + Copy-Item -Path $(Build.sourcesDirectory)/onnxruntime/test/python/onnx_backend_test_series.py -Destination $(Agent.TempDirectory)\ort_test_data + Copy-Item -Recurse -Path $(Build.sourcesDirectory)/onnxruntime/test/testdata -Destination $(Agent.TempDirectory)\ort_test_data + cd $(Agent.TempDirectory)\ort_test_data + python onnx_backend_test_series.py --devices ${{ parameters.EP_NAME }} -v + cd $(Agent.TempDirectory) + Remove-Item -Path $(Agent.TempDirectory)\ort_test_data -Recurse -Force + workingDirectory: '$(Build.sourcesDirectory)' + displayName: 'Run Python Tests with ${{ parameters.EP_NAME }} EP' \ No newline at end of file diff --git a/tools/ci_build/github/azure-pipelines/stages/py-gpu-packaging-stage.yml b/tools/ci_build/github/azure-pipelines/stages/py-gpu-packaging-stage.yml index 947e4f99b984f..f7235e3ad2076 100644 --- a/tools/ci_build/github/azure-pipelines/stages/py-gpu-packaging-stage.yml +++ b/tools/ci_build/github/azure-pipelines/stages/py-gpu-packaging-stage.yml @@ -56,7 +56,7 @@ stages: PYTHON_VERSION: ${{ python_version }} EP_NAME: gpu CudaVersion: ${{ parameters.cuda_version }} - EP_BUILD_FLAGS: --enable_lto --cuda_home=$(Agent.TempDirectory)\v${{ parameters.cuda_version }} --cmake_extra_defines "CMAKE_CUDA_ARCHITECTURES=52;60;61;70;75;80" + EP_BUILD_FLAGS: --use_dml --enable_lto --cuda_home=$(Agent.TempDirectory)\v${{ parameters.cuda_version }} --cmake_extra_defines "CMAKE_CUDA_ARCHITECTURES=52;60;61;70;75;80" use_tensorrt: True - ${{ if eq(parameters.enable_linux_cuda, true) }}: diff --git a/tools/ci_build/github/azure-pipelines/stages/py-win-gpu-stage.yml b/tools/ci_build/github/azure-pipelines/stages/py-win-gpu-stage.yml index aa7f2845fc0fa..dd0539f751c89 100644 --- a/tools/ci_build/github/azure-pipelines/stages/py-win-gpu-stage.yml +++ b/tools/ci_build/github/azure-pipelines/stages/py-win-gpu-stage.yml @@ -33,7 +33,7 @@ parameters: - Release - RelWithDebInfo - MinSizeRel - + - name: use_tensorrt type: boolean default: false @@ -134,7 +134,7 @@ stages: --cmake_generator "$(VSGenerator)" --enable_pybind --enable_onnx_tests - --parallel --use_binskim_compliant_compile_flags --update --build + --parallel 4 --use_binskim_compliant_compile_flags --update --build $(TelemetryOption) ${{ parameters.BUILD_PY_PARAMETERS }} ${{ parameters.EP_BUILD_FLAGS }} ${{ variables.trt_build_flag }} workingDirectory: '$(Build.BinariesDirectory)' @@ -206,19 +206,20 @@ stages: DownloadTRT: ${{ parameters.use_tensorrt }} - task: PowerShell@2 - displayName: 'Install ONNX' + displayName: 'Install Third Party Dependencies' inputs: filePath: '$(Build.SourcesDirectory)/tools/ci_build/github/windows/install_third_party_deps.ps1' workingDirectory: '$(Build.BinariesDirectory)' arguments: -cpu_arch x64 -install_prefix $(Build.BinariesDirectory)\${{ parameters.cmake_build_type }}\installed -build_config ${{ parameters.cmake_build_type }} - - powershell: | - python -m pip uninstall -y onnxruntime onnxruntime-gpu -qq - Get-ChildItem -Path $(Build.ArtifactStagingDirectory)/*cp${{ replace(parameters.PYTHON_VERSION,'.','') }}*.whl | foreach {pip --disable-pip-version-check install --upgrade $_.fullname tabulate} - mkdir -p $(Agent.TempDirectory)\ort_test_data - Copy-Item -Path $(Build.sourcesDirectory)/onnxruntime/test/python/onnx_backend_test_series.py -Destination $(Agent.TempDirectory)\ort_test_data - Copy-Item -Recurse -Path $(Build.sourcesDirectory)/onnxruntime/test/testdata -Destination $(Agent.TempDirectory)\ort_test_data - cd $(Agent.TempDirectory)\ort_test_data - python onnx_backend_test_series.py - workingDirectory: '$(Build.sourcesDirectory)' - displayName: 'Run Python Tests' + - template: jobs/steps/py_packaging_test_step.yml + parameters: + EP_NAME: DML + PYTHON_VERSION: ${{ parameters.PYTHON_VERSION }} + + - template: jobs/steps/py_packaging_test_step.yml + parameters: + EP_NAME: CUDA + PYTHON_VERSION: ${{ parameters.PYTHON_VERSION }} + + From 1c79a4c9dd6dbc6fe50b1f3a1f771dc6b619baf6 Mon Sep 17 00:00:00 2001 From: Yulong Wang <7679871+fs-eire@users.noreply.github.com> Date: Wed, 4 Dec 2024 19:01:26 -0800 Subject: [PATCH 21/22] [js/common] use TS type inference to eliminate `unknown` (#23012) ### Description This change uses a TypeScript trick to infer global types in onnxruntime-common. Thanks to the strong type system of TypeScript, we are able to refer to types that may not be available in the context. This helps to keep onnxruntime-common not to include dependencies like "@webgpu/types", and still being able to use the types in the declaration. See comments of `TryGetGlobalType` in `type-helper.ts`. --- js/common/lib/env.ts | 11 +++-------- js/common/lib/inference-session.ts | 7 ++++--- js/common/lib/tensor.ts | 14 +++++--------- js/common/lib/type-helper.ts | 31 ++++++++++++++++++++++++++++++ js/common/typedoc.json | 1 + js/web/lib/wasm/wasm-core-impl.ts | 2 +- 6 files changed, 45 insertions(+), 21 deletions(-) create mode 100644 js/common/lib/type-helper.ts diff --git a/js/common/lib/env.ts b/js/common/lib/env.ts index adb6a440cf22a..e70f608ad7030 100644 --- a/js/common/lib/env.ts +++ b/js/common/lib/env.ts @@ -2,6 +2,7 @@ // Licensed under the MIT License. import { env as envImpl } from './env-impl.js'; +import { TryGetGlobalType } from './type-helper.js'; export declare namespace Env { export type WasmPathPrefix = string; @@ -198,22 +199,16 @@ export declare namespace Env { * value will be the GPU adapter that created by the underlying WebGPU backend. * * When use with TypeScript, the type of this property is `GPUAdapter` defined in "@webgpu/types". - * Use `const adapter = env.webgpu.adapter as GPUAdapter;` in TypeScript to access this property with correct type. - * - * see comments on {@link Tensor.GpuBufferType} */ - adapter: unknown; + adapter: TryGetGlobalType<'GPUAdapter'>; /** * Get the device for WebGPU. * * This property is only available after the first WebGPU inference session is created. * * When use with TypeScript, the type of this property is `GPUDevice` defined in "@webgpu/types". - * Use `const device = env.webgpu.device as GPUDevice;` in TypeScript to access this property with correct type. - * - * see comments on {@link Tensor.GpuBufferType} for more details about why not use types defined in "@webgpu/types". */ - readonly device: unknown; + readonly device: TryGetGlobalType<'GPUDevice'>; /** * Set or get whether validate input content. * diff --git a/js/common/lib/inference-session.ts b/js/common/lib/inference-session.ts index 547db029471a2..e62c6579e8333 100644 --- a/js/common/lib/inference-session.ts +++ b/js/common/lib/inference-session.ts @@ -4,6 +4,7 @@ import { InferenceSession as InferenceSessionImpl } from './inference-session-impl.js'; import { OnnxModelOptions } from './onnx-model.js'; import { OnnxValue, OnnxValueDataLocation } from './onnx-value.js'; +import { TryGetGlobalType } from './type-helper.js'; /* eslint-disable @typescript-eslint/no-redeclare */ @@ -282,7 +283,7 @@ export declare namespace InferenceSession { extends WebNNExecutionProviderName, Omit, Required> { - context: unknown /* MLContext */; + context: TryGetGlobalType<'MLContext'>; } /** @@ -291,8 +292,8 @@ export declare namespace InferenceSession { * @see https://www.w3.org/TR/webnn/#dom-ml-createcontext-gpudevice */ export interface WebNNOptionsWebGpu extends WebNNExecutionProviderName { - context: unknown /* MLContext */; - gpuDevice: unknown /* GPUDevice */; + context: TryGetGlobalType<'MLContext'>; + gpuDevice: TryGetGlobalType<'GPUDevice'>; } /** diff --git a/js/common/lib/tensor.ts b/js/common/lib/tensor.ts index af918705b97e3..05553bd96662b 100644 --- a/js/common/lib/tensor.ts +++ b/js/common/lib/tensor.ts @@ -4,6 +4,7 @@ import { TensorFactory } from './tensor-factory.js'; import { Tensor as TensorImpl } from './tensor-impl.js'; import { TypedTensorUtils } from './tensor-utils.js'; +import { TryGetGlobalType } from './type-helper.js'; /* eslint-disable @typescript-eslint/no-redeclare */ @@ -131,24 +132,19 @@ export declare namespace Tensor { */ export type TextureDataTypes = 'float32'; + type GpuBufferTypeFallback = { size: number; mapState: 'unmapped' | 'pending' | 'mapped' }; /** * type alias for WebGPU buffer - * - * The reason why we don't use type "GPUBuffer" defined in webgpu.d.ts from @webgpu/types is because "@webgpu/types" - * requires "@types/dom-webcodecs" as peer dependency when using TypeScript < v5.1 and its version need to be chosen - * carefully according to the TypeScript version being used. This means so far there is not a way to keep every - * TypeScript version happy. It turns out that we will easily broke users on some TypeScript version. - * - * for more info see https://github.com/gpuweb/types/issues/127 */ - export type GpuBufferType = { size: number; mapState: 'unmapped' | 'pending' | 'mapped' }; + export type GpuBufferType = TryGetGlobalType<'GPUBuffer', GpuBufferTypeFallback>; + type MLTensorTypeFallback = { destroy(): void }; /** * type alias for WebNN MLTensor * * The specification for WebNN's MLTensor is currently in flux. */ - export type MLTensorType = unknown; + export type MLTensorType = TryGetGlobalType<'MLTensor', MLTensorTypeFallback>; /** * supported data types for constructing a tensor from a WebGPU buffer diff --git a/js/common/lib/type-helper.ts b/js/common/lib/type-helper.ts new file mode 100644 index 0000000000000..845ba3018d443 --- /dev/null +++ b/js/common/lib/type-helper.ts @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/** + * A helper type to get certain types if they are declared in global scope. + * + * For example, if you installed "@webgpu/types" as a dev dependency, then `TryGetTypeIfDeclared<'GPUDevice'>` will + * be type `GPUDevice`, otherwise it will be type `unknown`. + * + * + * We don't want to introduce "@webgpu/types" as a dependency of this package because: + * + * (1) For JavaScript users, it's not needed. For TypeScript users, they can install it as dev dependency themselves. + * + * (2) because "@webgpu/types" requires "@types/dom-webcodecs" as peer dependency when using TypeScript < v5.1 and its + * version need to be chosen carefully according to the TypeScript version being used. This means so far there is not a + * way to keep every TypeScript version happy. It turns out that we will easily broke users on some TypeScript version. + * + * for more info see https://github.com/gpuweb/types/issues/127 + * + * Update (2024-08-07): The reason (2) may be no longer valid. Most people should be using TypeScript >= 5.1 by now. + * However, we are still not sure whether introducing "@webgpu/types" as direct dependency is a good idea. We find this + * type helper is useful for TypeScript users. + * + * @ignore + */ +export type TryGetGlobalType = typeof globalThis extends { + [k in Name]: { prototype: infer T }; +} + ? T + : Fallback; diff --git a/js/common/typedoc.json b/js/common/typedoc.json index 088c7ba4053e6..f9c7e7b19db41 100644 --- a/js/common/typedoc.json +++ b/js/common/typedoc.json @@ -1,6 +1,7 @@ { "entryPoints": ["lib/index.ts"], "excludeInternal": true, + "intentionallyNotExported": ["TryGetGlobalType"], "name": "ONNX Runtime JavaScript API", "readme": "none", "cleanOutputDir": true diff --git a/js/web/lib/wasm/wasm-core-impl.ts b/js/web/lib/wasm/wasm-core-impl.ts index 81d1b73efc9d4..da8939cd0263a 100644 --- a/js/web/lib/wasm/wasm-core-impl.ts +++ b/js/web/lib/wasm/wasm-core-impl.ts @@ -487,7 +487,7 @@ export const prepareInputOutputTensor = ( } if (location === 'gpu-buffer') { - const gpuBuffer = tensor[2].gpuBuffer as GPUBuffer; + const gpuBuffer = tensor[2].gpuBuffer; dataByteLength = calculateTensorSizeInBytes(tensorDataTypeStringToEnum(dataType), dims)!; const registerBuffer = wasm.jsepRegisterBuffer; From 6ed77cc374c04956bc1197d0dc0fe5a2ed9b15b9 Mon Sep 17 00:00:00 2001 From: Yi Zhang Date: Thu, 5 Dec 2024 14:07:21 +0800 Subject: [PATCH 22/22] Deprecate macos-12 (#23017) ### Description ### Motivation and Context ESRP code-sign task has supported .net 8, so we can remove macos-12 --- .../azure-pipelines/templates/mac-cpu-packaging-pipeline.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/tools/ci_build/github/azure-pipelines/templates/mac-cpu-packaging-pipeline.yml b/tools/ci_build/github/azure-pipelines/templates/mac-cpu-packaging-pipeline.yml index 080079388a76c..ab31e592d7d71 100644 --- a/tools/ci_build/github/azure-pipelines/templates/mac-cpu-packaging-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/templates/mac-cpu-packaging-pipeline.yml @@ -68,9 +68,6 @@ stages: jobs: - job: MacOS_C_API_Package_Publish pool: - ${{ if eq(parameters.DoESRP, true)}}: - vmImage: 'macOS-12' - ${{ else }}: vmImage: 'macOS-13' steps: - checkout: none