Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Running C tests after build #93

Merged
merged 80 commits into from
Mar 4, 2024
Merged
Show file tree
Hide file tree
Changes from 22 commits
Commits
Show all changes
80 commits
Select commit Hold shift + click to select a range
1366372
Rename tests based on test suite name
jchen351 Feb 17, 2024
1b14bb7
Update ci pipeline
jchen351 Feb 17, 2024
3617f73
Update unit_tests model path
jchen351 Feb 17, 2024
1742305
Update build binary path
jchen351 Feb 17, 2024
ed0c934
Update cmake build
jchen351 Feb 17, 2024
5eccb0f
Copy ort dll to test folder
jchen351 Feb 18, 2024
54ff4e9
Merge branch 'main' into Cjian/cgt
jchen351 Feb 18, 2024
3915d6d
Prepend CUDA to path
jchen351 Feb 18, 2024
97ded88
Prepend CUDA to path
jchen351 Feb 18, 2024
f6bde5d
update $env:GITHUB_PATH
jchen351 Feb 18, 2024
d23ad04
Merge remote-tracking branch 'origin/Cjian/cgt' into Cjian/cgt
jchen351 Feb 18, 2024
c83f17c
update $env:GITHUB_PATH
jchen351 Feb 18, 2024
da64e6b
update GITHUB_PATH
jchen351 Feb 18, 2024
4394ea8
update GITHUB_PATH
jchen351 Feb 18, 2024
b345cb9
Update env:PATH
jchen351 Feb 18, 2024
fa0f351
Update env:PATH to bin
jchen351 Feb 18, 2024
03849ba
Update onnxruntime_libs
jchen351 Feb 18, 2024
67a1242
Update onnxruntime_libs
jchen351 Feb 19, 2024
642e243
add #ifndef MODEL_PATH
jchen351 Feb 19, 2024
858bd3b
Adding path
jchen351 Feb 19, 2024
a5cddbc
change phi2 to phi-2
jchen351 Feb 19, 2024
a7d896f
remove llama
jchen351 Feb 19, 2024
e7930d4
update cuda version to 11.8
jchen351 Feb 20, 2024
3ed6718
Update cuda to use c++17 standard
jchen351 Feb 20, 2024
9f70b10
Update cuda to use c++17 standard
jchen351 Feb 20, 2024
4b4daee
update cuda home
jchen351 Feb 20, 2024
f8b05f1
using ort cuda12
jchen351 Feb 20, 2024
1abb778
Update cuda model to cuda 12
jchen351 Feb 20, 2024
8643a7b
Update cuda model to cuda 12
jchen351 Feb 20, 2024
6e827bb
Update cuda model to cuda 12
jchen351 Feb 20, 2024
760cca5
check CUDA version
jchen351 Feb 20, 2024
65ad89c
TEST_PHI2=False
jchen351 Feb 20, 2024
b1a46e0
TEST_PHI2=False
jchen351 Feb 20, 2024
b255996
update to use cuda 11.8
jchen351 Feb 20, 2024
6819fd1
Install nvidia container toolkit
jchen351 Feb 21, 2024
6957791
Install nvidia container toolkit
jchen351 Feb 21, 2024
7834435
remove nvidia container toolkit
jchen351 Feb 21, 2024
51ce454
setting CUDA_BIN_PATH
jchen351 Feb 21, 2024
9790fb7
setting CUDA_BIN_PATH
jchen351 Feb 21, 2024
a4fb5bc
Merge branch 'main' into Cjian/cgt
jchen351 Feb 21, 2024
d449ee9
update python build wheel
jchen351 Feb 22, 2024
4b159aa
Merge branch 'Cjian/fix_wheel' into Cjian/cgt
jchen351 Feb 22, 2024
89b4f49
print nvidia-smi
jchen351 Feb 22, 2024
378c05c
Merge branch 'main' into Cjian/cgt
jchen351 Feb 22, 2024
b4e8af5
Merge branch 'main' into Cjian/cgt
jchen351 Feb 26, 2024
78425e8
Merge branch 'main' into Cjian/cgt
jchen351 Feb 27, 2024
07d80a5
disable TEST_PHI2
jchen351 Feb 27, 2024
513c753
/opt/rh/gcc-toolset-12
jchen351 Feb 27, 2024
7d567c2
Remove requirment for gcc 9 or greater
jchen351 Feb 27, 2024
d02273a
CMAKE_C_COMPILER_VERSION VERSION_LESS 8
jchen351 Feb 27, 2024
0d5a021
CMAKE_C_COMPILER_VERSION VERSION_LESS 8
jchen351 Feb 27, 2024
cf975f1
Merge branch 'main' into Cjian/cgt
jchen351 Feb 27, 2024
acd7a4e
/p:Configuration=Release
jchen351 Feb 27, 2024
8aab9f9
- name: Run tests
jchen351 Feb 27, 2024
f21b715
message("USE C++17 Because of CUDA Version is less than 12")
jchen351 Feb 29, 2024
a9cacf7
Managed
jchen351 Mar 1, 2024
969b5b2
Managed
jchen351 Mar 1, 2024
3076925
ENABLE_TESTS=OFF
jchen351 Mar 1, 2024
f02e014
#ifdef TODO_FIX_IT
jchen351 Mar 1, 2024
7fbe439
#ifdef TODO_FIX_IT
jchen351 Mar 1, 2024
3199845
-DENABLE_TESTS=OFF
jchen351 Mar 1, 2024
6a0755b
};
jchen351 Mar 1, 2024
7a913df
reformact
jchen351 Mar 1, 2024
10c7bfe
Remove linux gpu 64
jchen351 Mar 1, 2024
8ee3459
Remove unused comment
jchen351 Mar 2, 2024
8def696
Merge branch 'refs/heads/main' into Cjian/cgt
jchen351 Mar 3, 2024
ad82013
python3 -m pip install build/${{ matrix.compiler }}_cpu/rel…
jchen351 Mar 3, 2024
a0f7bea
== 'gcc'
jchen351 Mar 3, 2024
faaeb3e
/onnxruntime_src/
jchen351 Mar 3, 2024
bc7cfdd
reformat dock run
jchen351 Mar 3, 2024
2fc988b
Dockerfile
jchen351 Mar 3, 2024
da96623
Dockerfile
jchen351 Mar 3, 2024
36f688e
Dockerfile
jchen351 Mar 3, 2024
2ed4d9b
Dockerfile
jchen351 Mar 3, 2024
3e717ba
az login
jchen351 Mar 3, 2024
6768149
az login
jchen351 Mar 3, 2024
3c9f66e
az login
jchen351 Mar 3, 2024
4e7d515
az login
jchen351 Mar 4, 2024
ef1a538
#TODO: Re-enable Test by removing -DENABLE_TESTS=OFF when the L…
jchen351 Mar 4, 2024
9a1df25
diaable-- Install the onnxruntime-genai Python wheel and run Python t…
jchen351 Mar 4, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 29 additions & 10 deletions .github/workflows/linux-cpu-arm64-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@ env:
ort_zip: "onnxruntime-linux-aarch64-1.17.0.tgz"
ort_url: "https://github.com/microsoft/onnxruntime/releases/download/v1.17.0/onnxruntime-linux-aarch64-1.17.0.tgz"
jobs:
job:
linux-cpu-arm64-build:
runs-on: [ "self-hosted", "1ES.Pool=onnxruntime-genai-Ubuntu2004-ARM-CPU" ]
steps:
- name: Checkout OnnxRuntime GenAI repo
uses: actions/checkout@v2


uses: actions/checkout@v4
with:
submodules: 'true'

- name: Download OnnxRuntime
run: |
Expand All @@ -29,19 +29,38 @@ jobs:
run: |
mv ${{ env.ort_dir }} ort

- name: Git Submodule Update
run: |
git submodule update --init --recursive

- name: Build with CMake and GCC
- name: Download Docker Image
run: |
set -e -x
python3 tools/ci_build/get_docker_image.py --dockerfile tools/ci_build/github/linux/docker/inference/aarch64/default/cpu/Dockerfile \
--context tools/ci_build/github/linux/docker/inference/aarch64/default/cpu \
--docker-build-args "--build-arg BUILD_UID=$( id -u )" \
--container-registry onnxruntimebuildcache \
--repository onnxruntimecpubuild
docker run --rm --volume $GITHUB_WORKSPACE:/onnxruntime_src -w /onnxruntime_src onnxruntimecpubuild bash -c "echo $PATH && /usr/bin/cmake --preset linux_gcc_cpu_release && /usr/bin/cmake --build --preset linux_gcc_cpu_release"

- name: Doker -- Configure with CMake and GCC
run: |
docker run --rm \
--volume $GITHUB_WORKSPACE:/onnxruntime_src \
-w /onnxruntime_src onnxruntimecpubuild bash -c "/usr/bin/cmake --preset linux_gcc_cpu_release"

- name: Docker -- Build with CMake and GCC
run: |
docker run --rm \
--volume $GITHUB_WORKSPACE:/onnxruntime_src \
-w /onnxruntime_src onnxruntimecpubuild bash -c "/usr/bin/cmake --build --preset linux_gcc_cpu_release"

- name: Dokcer -- check test directory
run: |
docker run --rm \
--volume $GITHUB_WORKSPACE:/onnxruntime_src \
-w /onnxruntime_src onnxruntimecpubuild bash -c "ls -l /onnxruntime_src/build/gcc_cpu/release/test/"

- name: Docker -- Run tests
run: |
docker run --rm \
--volume $GITHUB_WORKSPACE:/onnxruntime_src \
-w /onnxruntime_src onnxruntimecpubuild bash -c "/onnxruntime_src/build/gcc_cpu/release/test/unit_tests"



Expand Down
40 changes: 20 additions & 20 deletions .github/workflows/linux-cpu-x64-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,16 @@ env:
ort_zip: "onnxruntime-linux-x64-1.17.0.tgz"
ort_url: "https://github.com/microsoft/onnxruntime/releases/download/v1.17.0/onnxruntime-linux-x64-1.17.0.tgz"
jobs:
job:
linux_cpu_x64:
strategy:
matrix:
compiler: [ gcc, clang ]
runs-on: [ "self-hosted", "1ES.Pool=onnxruntime-genai-Ubuntu2204-AMD-CPU" ]
steps:
- name: Checkout OnnxRuntime GenAI repo
uses: actions/checkout@v2


uses: actions/checkout@v4
with:
submodules: true

- name: Download OnnxRuntime
run: |
Expand All @@ -29,31 +32,28 @@ jobs:
run: |
mv ${{ env.ort_dir }} ort

- name: Git Submodule Update
run: |
git submodule update --init --recursive

- name: Build with CMake and GCC
run: |
set -e -x
rm -rf build
cmake --preset linux_gcc_cpu_release
cmake --build --preset linux_gcc_cpu_release

- name: Build with CMake and clang
run: |
set -e -x
rm -rf build
cmake --preset linux_clang_cpu_release
cmake --build --preset linux_clang_cpu_release
cmake --preset linux_${{ matrix.compiler }}_cpu_release
cmake --build --preset linux_${{ matrix.compiler }}_cpu_release
snnn marked this conversation as resolved.
Show resolved Hide resolved

- name: Verify Build Artifacts
if: always()
continue-on-error: true
run: |
ls -l ${{ github.workspace }}/build
ls -l ${{ github.workspace }}/build/${{ matrix.compiler }}_cpu/release

- if: ${{ matrix.compiler }} == 'gcc'
name: Run tests
run: |
set -e -x
./build/${{ matrix.compiler }}_cpu/release/test/unit_tests

- name: Upload Build Artifacts
- if: ${{ matrix.compiler }} == 'gcc'
name: Upload Build Artifacts
uses: actions/upload-artifact@v3
with:
name: onnxruntime-genai-linux-cpu-x64
path: ${{ github.workspace }}/build/**/*.a
path: ${{ github.workspace }}/build/**/*.a
42 changes: 34 additions & 8 deletions .github/workflows/linux-gpu-x64-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,13 @@ env:
ort_zip: "onnxruntime-linux-x64-gpu-1.17.0.tgz"
ort_url: "https://github.com/microsoft/onnxruntime/releases/download/v1.17.0/onnxruntime-linux-x64-gpu-1.17.0.tgz"
jobs:
job:
linux-gpu-x64-build:
runs-on: [ "self-hosted", "1ES.Pool=onnxruntime-genai-Ubuntu2004-T4" ]
steps:
- name: Checkout OnnxRuntime GenAI repo
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
submodules: true

- name: Download OnnxRuntime
run: |
Expand All @@ -29,10 +31,6 @@ jobs:
run: |
mv ${{ env.ort_dir }} ort

- name: Git Submodule Update
run: |
git submodule update --init --recursive

- name: Download Docker Image
run: |
set -e -x
Expand All @@ -47,12 +45,40 @@ jobs:
echo "Printing docker image environment variables"
docker run --rm onnxruntimegpubuild env

- name: Build with Cmake in Docker
- name: Docker -- Configure with CMake and GCC
run: |
echo "Running docker image onnxruntimegpubuild"
docker run \
--gpus all \
--rm \
--volume $GITHUB_WORKSPACE:/onnxruntime_src \
-w /onnxruntime_src onnxruntimegpubuild bash -c "/usr/bin/cmake -DCMAKE_CUDA_ARCHITECTURES=86 --preset linux_gcc_cuda_release "

- name: Docker -- Build with CMake and GCC
run: |
echo "Running docker image onnxruntimegpubuild"
docker run \
--gpus all \
--rm \
--volume $GITHUB_WORKSPACE:/onnxruntime_src \
-w /onnxruntime_src onnxruntimegpubuild bash -c "/usr/bin/cmake --build --preset linux_gcc_cuda_release"

- name: Docker -- check test directory
run: |
echo "Running docker image onnxruntimegpubuild"
docker run \
--gpus all \
--rm \
--volume $GITHUB_WORKSPACE:/onnxruntime_src \
-w /onnxruntime_src onnxruntimegpubuild bash -c "ls -l /onnxruntime_src/build/gcc_cuda/release/test/"

- name: Docker -- Run tests
run: |
echo "Running docker image onnxruntimegpubuild"
docker run \
--gpus all \
--rm \
--volume $GITHUB_WORKSPACE:/onnxruntime_src \
-w /onnxruntime_src onnxruntimegpubuild bash -c "echo $PATH && /usr/bin/cmake -DCMAKE_CUDA_ARCHITECTURES=86 --preset linux_gcc_cuda_release && /usr/bin/cmake --build --preset linux_gcc_cuda_release"
-w /onnxruntime_src onnxruntimegpubuild bash -c "/onnxruntime_src/build/gcc_cuda/release/test/unit_tests"


5 changes: 3 additions & 2 deletions .github/workflows/mac-cpu-arm64-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ env:
ort_zip: "onnxruntime-osx-arm64-1.17.0.tgz"
ort_url: "https://github.com/microsoft/onnxruntime/releases/download/v1.17.0/onnxruntime-osx-arm64-1.17.0.tgz"
jobs:
job:
mac-cpu-arm64-build:
runs-on: macos-latest
steps:
- name: Checkout OnnxRuntime GenAI repo
Expand Down Expand Up @@ -37,12 +37,13 @@ jobs:
run: |
- name: Build with CMake
run: |
cmake -G "Ninja" -B build . -DCMAKE_BUILD_TYPE=Release -DUSE_CUDA=OFF
cmake -G "Ninja" -B build -S . -DCMAKE_BUILD_TYPE=Release -DUSE_CUDA=OFF
cmake --build build --config Release --parallel
continue-on-error: true

- name: Verify Build Artifacts
if: always()
continue-on-error: true
run: |
ls -l ${{ github.workspace }}/build

Expand Down
20 changes: 15 additions & 5 deletions .github/workflows/win-cpu-arm64-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,14 @@ env:
ort_url: "https://github.com/microsoft/onnxruntime/releases/download/v1.17.0/$(ort_zip)"

jobs:
job:
runs-on: ["self-hosted", "1ES.Pool=onnxruntime-genai-Win11-ARM-CPU"]
windows-cpu-arm64-build:
runs-on: [ "self-hosted", "1ES.Pool=onnxruntime-genai-Win11-ARM-CPU" ]
steps:
- uses: actions/checkout@v2
- name: Checkout OnnxRuntime GenAI repo
uses: actions/checkout@v4
with:
submodules: true

- name: Setup Visual Studio 2022
uses: microsoft/[email protected]
with:
Expand All @@ -42,13 +46,19 @@ jobs:

- name: Build with CMake
run: |
cmake -G "Visual Studio 17 2022" -A arm64 . -DCMAKE_BUILD_TYPE=Release -DUSE_CUDA=OFF
cmake --build . --config Release --parallel
cmake -G "Visual Studio 17 2022" -A arm64 -S . -B build -DCMAKE_BUILD_TYPE=Release -DUSE_CUDA=OFF
cmake --build build --config Release --parallel

- name: Verify Build Artifacts
if: always()
continue-on-error: true
run: |
Get-ChildItem -Path $env:GITHUB_WORKSPACE\Release -Recurse
Get-ChildItem -Path $env:GITHUB_WORKSPACE\test\Release -Recurse

- name: Run tests
run: |
.\build\test\Release\unit_tests.exe

- name: Upload Build Artifacts
uses: actions/upload-artifact@v3
Expand Down
25 changes: 15 additions & 10 deletions .github/workflows/win-cpu-x64-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,16 @@ env:
ort_url: "https://github.com/microsoft/onnxruntime/releases/download/v1.17.0/$(ort_zip)"

jobs:
job:
runs-on: ["self-hosted", "1ES.Pool=onnxruntime-genai-Win2022-CPU"]
windows-cpu-x64-build:
runs-on: [ "self-hosted", "1ES.Pool=onnxruntime-genai-Win2022-CPU" ]
permissions:
security-events: write
actions: read
steps:
- uses: actions/checkout@v2
- name: Checkout OnnxRuntime GenAI repo
uses: actions/checkout@v4
with:
submodules: true

- name: Setup Visual Studio 2022
uses: microsoft/[email protected]
Expand All @@ -43,24 +46,26 @@ jobs:
run: |
Rename-Item -Path $env:ort_dir -NewName ort

- name: Git Submodule Update
run: |
git submodule update --init --recursive

- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: 'cpp'

- name: Build with CMake
run: |
cmake -G "Visual Studio 17 2022" -A x64 . -DCMAKE_BUILD_TYPE=Release -DUSE_CUDA=OFF
cmake --build . --config Release --parallel
cmake -G "Visual Studio 17 2022" -A x64 -S . -B build -DCMAKE_BUILD_TYPE=Release -DUSE_CUDA=OFF
cmake --build build --config Release --parallel

- name: Verify Build Artifacts
if: always()
continue-on-error: true
run: |
Get-ChildItem -Path $env:GITHUB_WORKSPACE\build\Release -Recurse
Get-ChildItem -Path $env:GITHUB_WORKSPACE\build\test\Release -Recurse

- name: Run tests
run: |
Get-ChildItem -Path $env:GITHUB_WORKSPACE\Release -Recurse
.\build\test\Release\unit_tests.exe

- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
Expand Down
27 changes: 15 additions & 12 deletions .github/workflows/win-gpu-x64-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,14 @@ env:
cuda_version: "12.2"

jobs:
Windows-CUDA-12-Build:
windows-gpu-x64-build:
runs-on: [ "self-hosted", "1ES.Pool=onnxruntime-genai-Win2022-GPU-A10" ]
steps:
- uses: actions/checkout@v4
- name: Checkout OnnxRuntime GenAI repo
uses: actions/checkout@v4
with:
submodules: false
submodules: true

- uses: actions/setup-python@v5
with:
python-version: '3.11.x'
Expand All @@ -42,21 +44,22 @@ jobs:
run: |
Rename-Item -Path $env:ort_dir -NewName ort

- name: Git Submodule Update
run: |
git submodule update --init --recursive

- name: Build with CMake
run: |
mkdir build
cd build
cmake -G "Visual Studio 17 2022" -A x64 -T cuda=${{ env.cuda_dir }}\\v${{ env.cuda_version }} .. -DCMAKE_BUILD_TYPE=Release -DUSE_CUDA=TRUE
cmake --build . --config Release --parallel
cmake -G "Visual Studio 17 2022" -S . -B build -A x64 -T cuda=${{ env.cuda_dir }}\\v${{ env.cuda_version }} -DCMAKE_BUILD_TYPE=Release -DUSE_CUDA=TRUE
cmake --build build --config Release --parallel

- name: Verify Build Artifacts
if: always()
continue-on-error: true
run: |
Get-ChildItem -Path $env:GITHUB_WORKSPACE\build\test\Release -Recurse

- name: Prepend CUDA to PATH and Run tests
run: |
Get-ChildItem -Path $env:GITHUB_WORKSPACE\build\Release -Recurse
$env:PATH = "${{ env.cuda_dir }}\\v${{ env.cuda_version }}\\bin;" + $env:PATH
echo "Current PATH variable is: $env:PATH"
.\build\test\Release\unit_tests.exe

- name: Upload Build Artifacts
uses: actions/upload-artifact@v3
Expand Down
17 changes: 15 additions & 2 deletions test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,13 @@ file(GLOB test_srcs CONFIGURE_DEPENDS
)

# google unit test
add_executable(unit_tests main.cpp c_api.cpp tests.cpp sampling_benchmark.cpp sampling_tests.cpp)
add_executable(unit_tests
main.cpp
c_api_tests.cpp
model_tests.cpp
sampling_tests.cpp
sampling_benchmark.cpp
)
target_include_directories(unit_tests PRIVATE
${ORT_HEADER_DIR}
${CMAKE_SOURCE_DIR}/src
Expand Down Expand Up @@ -34,8 +40,15 @@ else()
target_include_directories(unit_tests PRIVATE ${TOKENIZER_ROOT})
target_link_libraries(unit_tests PRIVATE tokenizer)
endif()
file(GLOB onnxruntime_libs "${ORT_LIB_DIR}/${ONNXRUNTIME_FILES}")
set(TEST_MODEL_SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}/test_models/" "${CMAKE_SOURCE_DIR}/examples/")
set(TEST_MODEL_DES_DIR "$<TARGET_FILE_DIR:unit_tests>/test_models/")
add_custom_command(TARGET unit_tests POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_directory ${TEST_MODEL_SRC_DIR} ${TEST_MODEL_DES_DIR}
COMMAND ${CMAKE_COMMAND} -E copy ${onnxruntime_libs} $<TARGET_FILE_DIR:unit_tests>
)

add_compile_definitions(MODEL_PATH="${TEST_MODEL_DES_DIR}")
source_group("Sources" FILES ${test_srcs})
set_property(DIRECTORY ${CMAKE_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT unit_tests)
#set_property(TARGET unit_tests PROPERTY TARGET_RUNTIME_LIBRARY_DIRS ${ORT_LIB_DIR})
include(GoogleTest)
Loading
Loading