Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/main' into e2e
Browse files Browse the repository at this point in the history
  • Loading branch information
KfreeZ committed Aug 21, 2024
2 parents 476fe28 + 076e81e commit 1e88d26
Show file tree
Hide file tree
Showing 9 changed files with 80 additions and 17 deletions.
59 changes: 59 additions & 0 deletions .github/workflows/manual-freeze-tag.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

name: Freeze helm release tag in helm charts on manual event

on:
workflow_dispatch:
inputs:
oldappversion:
default: "v0.8"
description: "Old appVersion to be replaced"
required: true
type: string
newappversion:
default: "v0.9"
description: "New appVersion to replace"
required: true
type: string
oldversion:
default: "0.8.0"
description: "Old version to be replaced"
required: true
type: string
newversion:
default: "0.9.0"
description: "New version to replace"
required: true
type: string

jobs:
freeze-tag:
runs-on: ubuntu-latest

steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ github.ref }}

- name: Set up Git
run: |
git config --global user.name "NeuralChatBot"
git config --global user.email "[email protected]"
git remote set-url origin https://NeuralChatBot:"${{ secrets.ACTION_TOKEN }}"@github.com/opea-project/GenAIExamples.git
- name: Run script
env:
NEWTAG: ${{ inputs.newappversion }}
run: |
find helm-charts/ -name 'Chart.yaml' -type f -exec sed -i "s#appVersion: \"${{ inputs.oldappversion }}\"#appVersion: \"${{ inputs.newappversion }}\"#g" {} \;
find helm-charts/ -name 'Chart.yaml' -type f -exec sed -i "s#version: ${{ inputs.oldversion }}#version: ${{ inputs.newversion }}#g" {} \;
./helm-charts/update_manifests.sh
- name: Commit changes
run: |
git add .
git commit -s -m "Freeze Helm charts versions"
git push
21 changes: 11 additions & 10 deletions .github/workflows/push-image-build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,17 @@
name: Upgrade GMC system on push event

on:
push:
branches: ["main"]
paths:
- microservices-connector/**
- "!microservices-connector/helm/**"
- "!**.md"
- "!**.txt"
- "!**.png"
- "!.**"
- .github/workflows/gmc-on-push.yaml
# push:
# branches: ["main"]
# paths:
# - microservices-connector/**
# - "!microservices-connector/helm/**"
# - "!**.md"
# - "!**.txt"
# - "!**.png"
# - "!.**"
# - .github/workflows/gmc-on-push.yaml
workflow_dispatch:

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-on-push
Expand Down
8 changes: 4 additions & 4 deletions .github/workflows/scripts/e2e/gmc_xeon_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ function validate_chatqna() {
kubectl create ns $CHATQNA_NAMESPACE
sed -i "s|namespace: chatqa|namespace: $CHATQNA_NAMESPACE|g" $(pwd)/config/samples/chatQnA_xeon.yaml
# workaround for issue #268
yq -i '(.spec.nodes.root.steps[] | select ( .name == "Tgi")).internalService.config.MODEL_ID = "bigscience/bloom-560m"' $(pwd)/config/samples/chatQnA_xeon.yaml
#yq -i '(.spec.nodes.root.steps[] | select ( .name == "Tgi")).internalService.config.MODEL_ID = "bigscience/bloom-560m"' $(pwd)/config/samples/chatQnA_xeon.yaml
kubectl apply -f $(pwd)/config/samples/chatQnA_xeon.yaml

# Wait until the router service is ready
Expand Down Expand Up @@ -237,7 +237,7 @@ function validate_chatqna_with_dataprep() {
kubectl create ns $CHATQNA_DATAPREP_NAMESPACE
sed -i "s|namespace: chatqa|namespace: $CHATQNA_DATAPREP_NAMESPACE|g" $(pwd)/config/samples/chatQnA_dataprep_xeon.yaml
# workaround for issue #268
yq -i '(.spec.nodes.root.steps[] | select ( .name == "Tgi")).internalService.config.MODEL_ID = "bigscience/bloom-560m"' $(pwd)/config/samples/chatQnA_dataprep_xeon.yaml
#yq -i '(.spec.nodes.root.steps[] | select ( .name == "Tgi")).internalService.config.MODEL_ID = "bigscience/bloom-560m"' $(pwd)/config/samples/chatQnA_dataprep_xeon.yaml
kubectl apply -f $(pwd)/config/samples/chatQnA_dataprep_xeon.yaml

# Wait until the router service is ready
Expand Down Expand Up @@ -330,7 +330,7 @@ function validate_chatqna_in_switch() {
kubectl create ns $CHATQNA_SWITCH_NAMESPACE
sed -i "s|namespace: switch|namespace: $CHATQNA_SWITCH_NAMESPACE|g" $(pwd)/config/samples/chatQnA_switch_xeon.yaml
# workaround for issue #268
yq -i '(.spec.nodes.root.steps[] | select ( .name == "Tgi")).internalService.config.MODEL_ID = "bigscience/bloom-560m"' $(pwd)/config/samples/chatQnA_switch_xeon.yaml
#yq -i '(.spec.nodes.root.steps[] | select ( .name == "Tgi")).internalService.config.MODEL_ID = "bigscience/bloom-560m"' $(pwd)/config/samples/chatQnA_switch_xeon.yaml
kubectl apply -f $(pwd)/config/samples/chatQnA_switch_xeon.yaml

# Wait until the router service is ready
Expand Down Expand Up @@ -450,7 +450,7 @@ function validate_modify_config() {
fi

#change the model id of the step named "Tgi" in the codegen_xeon_mod.yaml
yq -i '(.spec.nodes.root.steps[] | select ( .name == "Tgi")).internalService.config.MODEL_ID = "bigscience/bloom-560m"' $(pwd)/config/samples/codegen_xeon_mod.yaml
yq -i '(.spec.nodes.root.steps[] | select ( .name == "Tgi")).internalService.config.MODEL_ID = "HuggingFaceH4/mistral-7b-grok"' $(pwd)/config/samples/codegen_xeon_mod.yaml
kubectl apply -f $(pwd)/config/samples/codegen_xeon_mod.yaml
#you are supposed to see an error, it's a known issue, but it does not affect the tests
#https://github.com/opea-project/GenAIInfra/issues/314
Expand Down
1 change: 1 addition & 0 deletions helm-charts/common/retriever-usvc/templates/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,4 @@ data:
LANGCHAIN_API_KEY: {{ .Values.global.LANGCHAIN_API_KEY | quote }}
LANGCHAIN_PROJECT: "opea-retriever-service"
HF_HOME: "/tmp/.cache/huggingface"
HUGGINGFACEHUB_API_TOKEN: {{ .Values.global.HUGGINGFACEHUB_API_TOKEN | quote}}
1 change: 1 addition & 0 deletions helm-charts/common/retriever-usvc/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -92,3 +92,4 @@ global:
no_proxy: ""
LANGCHAIN_TRACING_V2: false
LANGCHAIN_API_KEY: "insert-your-langchain-key-here"
HUGGINGFACEHUB_API_TOKEN: "insert-your-huggingface-token-here"
2 changes: 1 addition & 1 deletion helm-charts/common/tgi/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ image:
repository: ghcr.io/huggingface/text-generation-inference
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "latest-intel-cpu"
tag: "2.2.0"

imagePullSecrets: []
nameOverride: ""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ data:
LANGCHAIN_API_KEY: "insert-your-langchain-key-here"
LANGCHAIN_PROJECT: "opea-retriever-service"
HF_HOME: "/tmp/.cache/huggingface"
HUGGINGFACEHUB_API_TOKEN: "insert-your-huggingface-token-here"
---
# Source: retriever-usvc/templates/service.yaml
# Copyright (C) 2024 Intel Corporation
Expand Down
2 changes: 1 addition & 1 deletion microservices-connector/config/manifests/tgi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ spec:
optional: true
securityContext:
{}
image: "ghcr.io/huggingface/text-generation-inference:latest-intel-cpu"
image: "ghcr.io/huggingface/text-generation-inference:2.2.0"
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /data
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,5 +120,5 @@ spec:
serviceName: tgi-service-llama
config:
endpoint: /generate
MODEL_ID: bigscience/bloom-560m
MODEL_ID: HuggingFaceH4/mistral-7b-grok
isDownstreamService: true

0 comments on commit 1e88d26

Please sign in to comment.