-
Notifications
You must be signed in to change notification settings - Fork 3
61 lines (44 loc) · 1.18 KB
/
llamacpp.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
name: llama.cpp
defaults:
run:
shell: bash -ieo pipefail {0}
on:
workflow_dispatch:
pull_request:
paths:
- '.github/workflows/llamacpp.yaml'
- 'requirements/**'
push:
paths:
- '.github/workflows/llamacpp.yaml'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
issues: write
jobs:
unit-tests:
runs-on: ubuntu-latest
container:
image: ascendai/cann:8.0.rc2.beta1-910b-openeuler22.03-py3.9
steps:
- uses: actions/checkout@v4
- name: Install llamacpp
uses: nick-fields/retry@v3
with:
timeout_minutes: 30
max_attempts: 3
retry_on: error
command: |
yum update
yum install git cmake -y
pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
git clone https://github.com/ggerganov/llama.cpp.git
- name: Build
run: |
cd llama.cpp
mkdir build
cd build
cmake .. -DCMAKE_BUILD_TYPE=release -DWITH_CANN=on
cmake --build . -j $(nproc)