Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
chensuyue authored Aug 17, 2024
2 parents 701afbd + 77e0e7b commit 5d4b635
Show file tree
Hide file tree
Showing 127 changed files with 2,733 additions and 1,314 deletions.
38 changes: 38 additions & 0 deletions .github/workflows/manual-freeze-requirements.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

name: Freeze-requirements

on:
workflow_dispatch:

jobs:
freeze-requirements:
runs-on: ubuntu-latest

steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ github.ref }}

- uses: actions/setup-python@v5
with:
python-version: "3.10"

- name: Set up Git
run: |
git config --global user.name "NeuralChatBot"
git config --global user.email "[email protected]"
git remote set-url origin https://NeuralChatBot:"${{ secrets.ACTION_TOKEN }}"@github.com/opea-project/GenAIComps.git
- name: Run script
run: |
bash .github/workflows/scripts/freeze_requirements.sh
- name: Commit changes
run: |
git add .
git commit -s -m "Freeze requirements"
git push
19 changes: 9 additions & 10 deletions .github/workflows/pr-examples-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,26 +41,25 @@ jobs:
env:
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
run: |
cd ../ && sudo rm -rf GenAIExamples
git clone https://github.com/opea-project/GenAIExamples.git
cd ${{ github.workspace }}/GenAIExamples/ChatQnA/docker/gaudi
sed -i "s#:latest#:comps#g" compose.yaml
cat compose.yaml
cd GenAIExamples/ChatQnA/docker
cp -r ${{ github.workspace }}/../GenAIComps .
cd ${{ github.workspace }}/GenAIExamples/ChatQnA/tests
GenAIComps_dir=${{github.workspace}}
cd ../tests
sed -i '/GenAIComps.git/d' test_chatqna_on_gaudi.sh
sed -i "s#cd GenAIComps#cd ${GenAIComps_dir}#g" test_chatqna_on_gaudi.sh
sed -i "s#docker build -t#docker build --no-cache -q -t#g" test_chatqna_on_gaudi.sh
sed -i "s#:latest#:comps#g" test_chatqna_on_gaudi.sh
cat test_chatqna_on_gaudi.sh
echo "Run test..."
export IMAGE_TAG="comps"
timeout 50m bash test_chatqna_on_gaudi.sh
echo "LOG_PATH=$(pwd)/*.log" >> $GITHUB_ENV
- name: Clean up container
if: cancelled() || failure()
run: |
cd ${{ github.workspace }}/GenAIExamples/ChatQnA/docker/gaudi
cd ${{ github.workspace }}/../GenAIExamples/ChatQnA/docker/gaudi
docker compose stop && docker compose rm -f
docker system prune -f
Expand All @@ -69,4 +68,4 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: "Examples-Test-Logs"
path: ${{ github.workspace }}/GenAIExamples/ChatQnA/tests/*.log
path: ${{ env.LOG_PATH }}
1 change: 1 addition & 0 deletions .github/workflows/pr-microservice-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ jobs:
run: |
sudo rm -rf ${{github.workspace}}/*
docker system prune -f
docker rmi $(docker images --filter reference="*/*:comps" -q) || true
- name: Checkout out Repo
uses: actions/checkout@v4
Expand Down
63 changes: 63 additions & 0 deletions .github/workflows/scripts/freeze_requirements.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#!/bin/bash

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

function freeze() {
local file=$1
local folder=$(dirname "$file")
local keep_origin_packages="true"
echo "::group::Check $file ..."
pip-compile \
--no-upgrade \
--no-annotate \
--no-header \
--output-file "$folder/freeze.txt" \
"$file"
echo "::endgroup::"

if [[ -e "$folder/freeze.txt" ]]; then
if [[ "$keep_origin_packages" == "true" ]]; then
# fix corner cases
sed -i '/^\s*#/d; s/#.*//; /^\s*$/d; s/ //g' "$file"
sed -i '/^\s*#/d; s/#.*//; /^\s*$/d; s/ //g; s/huggingface-hub\[inference\]/huggingface-hub/g; s/uvicorn\[standard\]/uvicorn/g' "$folder/freeze.txt"
if grep -q '^transformers$' $file && ! grep -q '^transformers\[sentencepiece\]$' $file; then
sed -i "s/transformers\[sentencepiece\]/transformers/" "$folder/freeze.txt"
fi
packages1=$(tr '><' '=' <"$file" | cut -d'=' -f1 | tr '[:upper:]' '[:lower:]' | sed 's/[-_]/-/g')
packages2=$(cut -d'=' -f1 "$folder/freeze.txt" | tr '[:upper:]' '[:lower:]' | sed 's/[-_]/-/g')
common_packages=$(comm -12 <(echo "$packages2" | sort) <(echo "$packages1" | sort))
grep '^git\+' "$file" >temp_file || touch temp_file
rm -rf "$file" && mv temp_file "$file"
while IFS= read -r line; do
package=$(echo "$line" | cut -d'=' -f1)
package_transformed=$(echo "$package" | tr '[:upper:]' '[:lower:]' | sed 's/[_-]/-/g')
pattern=$(echo "$package_transformed" | sed 's/\[/\\\[/g; s/\]/\\\]/g')
if echo "$common_packages" | grep -q "^$pattern$"; then
echo "$line" >>"$file"
fi
done <"$folder/freeze.txt"
rm "$folder/freeze.txt"
else
mv "$folder/freeze.txt" "$file"
fi
fi
}

function check_branch_name() {
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
echo "$GITHUB_REF_NAME is protected branch"
exit 0
else
echo "branch name is $GITHUB_REF_NAME"
fi
}

function main() {
check_branch_name
echo "::group::pip install pip-tools" && pip install pip-tools --upgrade && echo "::endgroup::"
export -f freeze
find . -name "requirements.txt" | xargs -n 1 -I {} bash -c 'freeze "$@"' _ {}
}

main
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
__pycache__
*.egg-info/
2 changes: 2 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ repos:
- id: check-json
- id: check-yaml
- id: debug-statements
- id: mixed-line-ending
args: [--fix=lf]
- id: requirements-txt-fixer
- id: trailing-whitespace
files: (.*\.(py|rst|cmake|yaml|yml|json|ts|js|html|svelte|sh))$
Expand Down
16 changes: 8 additions & 8 deletions comps/agent/langchain/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,32 +4,32 @@ The langchain agent model refers to a framework that integrates the reasoning ca

![Architecture Overview](agent_arch.jpg)

# 🚀1. Start Microservice with Python(Option 1)
## 🚀1. Start Microservice with Python(Option 1)

## 1.1 Install Requirements
### 1.1 Install Requirements

```bash
cd comps/agent/langchain/
pip install -r requirements.txt
```

## 1.2 Start Microservice with Python Script
### 1.2 Start Microservice with Python Script

```bash
cd comps/agent/langchain/
python agent.py
```

# 🚀2. Start Microservice with Docker (Option 2)
## 🚀2. Start Microservice with Docker (Option 2)

## Build Microservices
### Build Microservices

```bash
cd GenAIComps/ # back to GenAIComps/ folder
docker build -t opea/comps-agent-langchain:latest -f comps/agent/langchain/docker/Dockerfile .
```

## start microservices
### start microservices

```bash
export ip_address=$(hostname -I | awk '{print $1}')
Expand All @@ -56,7 +56,7 @@ docker logs comps-langchain-agent-endpoint
> docker run --rm --runtime=runc --name="comps-langchain-agent-endpoint" -v ./comps/agent/langchain/:/home/user/comps/agent/langchain/ -p 9090:9090 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} --env-file ${agent_env} opea/comps-agent-langchain:latest
> ```
# 🚀3. Validate Microservice
## 🚀3. Validate Microservice
Once microservice starts, user can use below script to invoke.
Expand All @@ -73,7 +73,7 @@ data: [DONE]
```
# 🚀4. Provide your own tools
## 🚀4. Provide your own tools

- Define tools

Expand Down
24 changes: 12 additions & 12 deletions comps/asr/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@

ASR (Audio-Speech-Recognition) microservice helps users convert speech to text. When building a talking bot with LLM, users will need to convert their audio inputs (What they talk, or Input audio from other sources) to text, so the LLM is able to tokenize the text and generate an answer. This microservice is built for that conversion stage.

# 🚀1. Start Microservice with Python (Option 1)
## 🚀1. Start Microservice with Python (Option 1)

To start the ASR microservice with Python, you need to first install python packages.

## 1.1 Install Requirements
### 1.1 Install Requirements

```bash
pip install -r requirements.txt
```

## 1.2 Start Whisper Service/Test
### 1.2 Start Whisper Service/Test

- Xeon CPU

Expand Down Expand Up @@ -40,7 +40,7 @@ nohup python whisper_server.py --device=hpu &
python check_whisper_server.py
```

## 1.3 Start ASR Service/Test
### 1.3 Start ASR Service/Test

```bash
cd ../
Expand All @@ -54,13 +54,13 @@ While the Whisper service is running, you can start the ASR service. If the ASR
{'id': '0e686efd33175ce0ebcf7e0ed7431673', 'text': 'who is pat gelsinger'}
```

# 🚀2. Start Microservice with Docker (Option 2)
## 🚀2. Start Microservice with Docker (Option 2)

Alternatively, you can also start the ASR microservice with Docker.

## 2.1 Build Images
### 2.1 Build Images

### 2.1.1 Whisper Server Image
#### 2.1.1 Whisper Server Image

- Xeon CPU

Expand All @@ -76,15 +76,15 @@ cd ../..
docker build -t opea/whisper-gaudi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/whisper/Dockerfile_hpu .
```

### 2.1.2 ASR Service Image
#### 2.1.2 ASR Service Image

```bash
docker build -t opea/asr:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/asr/Dockerfile .
```

## 2.2 Start Whisper and ASR Service
### 2.2 Start Whisper and ASR Service

### 2.2.1 Start Whisper Server
#### 2.2.1 Start Whisper Server

- Xeon

Expand All @@ -98,15 +98,15 @@ docker run -p 7066:7066 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$htt
docker run -p 7066:7066 --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy opea/whisper-gaudi:latest
```

### 2.2.2 Start ASR service
#### 2.2.2 Start ASR service

```bash
ip_address=$(hostname -I | awk '{print $1}')

docker run -d -p 9099:9099 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e ASR_ENDPOINT=http://$ip_address:7066 opea/asr:latest
```

### 2.2.3 Test
#### 2.2.3 Test

```bash
# Use curl or python
Expand Down
3 changes: 1 addition & 2 deletions comps/asr/whisper/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,9 @@ RUN pip install --no-cache-dir --upgrade pip && \
else \
pip install --no-cache-dir -r /home/user/comps/asr/requirements.txt ; \
fi
pip list

ENV PYTHONPATH=$PYTHONPATH:/home/user

WORKDIR /home/user/comps/asr/whisper

ENTRYPOINT ["python", "whisper_server.py", "--device", "cpu"]
ENTRYPOINT ["python", "whisper_server.py", "--device", "cpu"]
Loading

0 comments on commit 5d4b635

Please sign in to comment.