From df75e78a0834e5c03083037b68c2b11c12136183 Mon Sep 17 00:00:00 2001 From: artem-astafev Date: Wed, 13 Nov 2024 11:43:30 +0700 Subject: [PATCH 01/13] Add compose example for DocSum amd rocm deployment Signed-off-by: artem-astafev --- DocSum/docker_compose/amd/gpu/rocm/README.md | 112 +++++++++++ .../docker_compose/amd/gpu/rocm/compose.yaml | 87 ++++++++ DocSum/docker_compose/amd/gpu/rocm/set_env.sh | 15 ++ DocSum/tests/test_compose_on_rocm.sh | 189 ++++++++++++++++++ 4 files changed, 403 insertions(+) create mode 100644 DocSum/docker_compose/amd/gpu/rocm/README.md create mode 100644 DocSum/docker_compose/amd/gpu/rocm/compose.yaml create mode 100644 DocSum/docker_compose/amd/gpu/rocm/set_env.sh create mode 100644 DocSum/tests/test_compose_on_rocm.sh diff --git a/DocSum/docker_compose/amd/gpu/rocm/README.md b/DocSum/docker_compose/amd/gpu/rocm/README.md new file mode 100644 index 000000000..53a48461d --- /dev/null +++ b/DocSum/docker_compose/amd/gpu/rocm/README.md @@ -0,0 +1,112 @@ +## 🚀 Start Microservices and MegaService + +### Required Models + +Default model is "Intel/neural-chat-7b-v3-3". Change "LLM_MODEL_ID" in environment variables below if you want to use another model. +For gated models, you also need to provide [HuggingFace token](https://huggingface.co/docs/hub/security-tokens) in "HUGGINGFACEHUB_API_TOKEN" environment variable. + +### Setup Environment Variables + +Since the `compose.yaml` will consume some environment variables, you need to setup them in advance as below. + +```bash +export DOCSUM_TGI_IMAGE="ghcr.io/huggingface/text-generation-inference:2.3.1-rocm" +export DOCSUM_LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" +export HOST_IP=${host_ip} +export DOCSUM_TGI_SERVICE_PORT="18882" +export DOCSUM_TGI_LLM_ENDPOINT="http://${HOST_IP}:${DOCSUM_TGI_SERVICE_PORT}" +export DOCSUM_HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} +export DOCSUM_LLM_SERVER_PORT="8008" +export DOCSUM_BACKEND_SERVER_PORT="8888" +export DOCSUM_FRONTEND_PORT="5173" +``` + +Note: Please replace with `host_ip` with your external IP address, do not use localhost. + +Note: In order to limit access to a subset of GPUs, please pass each device individually using one or more -device /dev/dri/rendered, where is the card index, starting from 128. (https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html#docker-restrict-gpus) + +Example for set isolation for 1 GPU + +``` + - /dev/dri/card0:/dev/dri/card0 + - /dev/dri/renderD128:/dev/dri/renderD128 +``` + +Example for set isolation for 2 GPUs + +``` + - /dev/dri/card0:/dev/dri/card0 + - /dev/dri/renderD128:/dev/dri/renderD128 + - /dev/dri/card1:/dev/dri/card1 + - /dev/dri/renderD129:/dev/dri/renderD129 +``` + +Please find more information about accessing and restricting AMD GPUs in the link (https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html#docker-restrict-gpus) + +### Start Microservice Docker Containers + +```bash +cd GenAIExamples/DocSum/docker_compose/amd/gpu/rocm +docker compose up -d +``` + +### Validate Microservices + +1. TGI Service + + ```bash + curl http://${host_ip}:8008/generate \ + -X POST \ + -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":64, "do_sample": true}}' \ + -H 'Content-Type: application/json' + ``` + +2. LLM Microservice + + ```bash + curl http://${host_ip}:9000/v1/chat/docsum \ + -X POST \ + -d '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' \ + -H 'Content-Type: application/json' + ``` + +3. MegaService + + ```bash + curl http://${host_ip}:8888/v1/docsum -H "Content-Type: application/json" -d '{ + "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5.","max_tokens":32, "language":"en", "stream":false + }' + ``` + +## 🚀 Launch the Svelte UI + +Open this URL `http://{host_ip}:5173` in your browser to access the frontend. + +![project-screenshot](https://github.com/intel-ai-tce/GenAIExamples/assets/21761437/93b1ed4b-4b76-4875-927e-cc7818b4825b) + +Here is an example for summarizing a article. + +![image](https://github.com/intel-ai-tce/GenAIExamples/assets/21761437/67ecb2ec-408d-4e81-b124-6ded6b833f55) + +## 🚀 Launch the React UI (Optional) + +To access the React-based frontend, modify the UI service in the `compose.yaml` file. Replace `docsum-rocm-ui-server` service with the `docsum-rocm-react-ui-server` service as per the config below: + +```yaml +docsum-rocm-react-ui-server: + image: ${REGISTRY:-opea}/docsum-react-ui:${TAG:-latest} + container_name: docsum-rocm-react-ui-server + depends_on: + - docsum-rocm-backend-server + ports: + - "5174:80" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT} +``` + +Open this URL `http://{host_ip}:5175` in your browser to access the frontend. + +![project-screenshot](../../../../assets/img/docsum-ui-react.png) diff --git a/DocSum/docker_compose/amd/gpu/rocm/compose.yaml b/DocSum/docker_compose/amd/gpu/rocm/compose.yaml new file mode 100644 index 000000000..23c540ed8 --- /dev/null +++ b/DocSum/docker_compose/amd/gpu/rocm/compose.yaml @@ -0,0 +1,87 @@ +# Copyright (C) 2024 Advanced Micro Devices, Inc. +# SPDX-License-Identifier: Apache-2.0 + +services: + docsum-tgi-service: + image: ghcr.io/huggingface/text-generation-inference:2.3.1-rocm + container_name: docsum-tgi-service + ports: + - "${DOCSUM_TGI_SERVICE_PORT}:80" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TGI_LLM_ENDPOINT: "http://${HOST_IP}:${DOCSUM_TGI_SERVICE_PORT}" + HUGGINGFACEHUB_API_TOKEN: ${DOCSUM_HUGGINGFACEHUB_API_TOKEN} + volumes: + - "/var/opea/docsum-service/data:/data" + shm_size: 1g + devices: + - /dev/kfd:/dev/kfd + cap_add: + - SYS_PTRACE + group_add: + - video + security_opt: + - seccomp:unconfined + ipc: host + command: --model-id ${DOCSUM_LLM_MODEL_ID} + docsum-llm-server: + image: ${REGISTRY:-opea}/llm-docsum-tgi:${TAG:-latest} + container_name: docsum-llm-server + depends_on: + - docsum-tgi-service + ports: + - "${DOCSUM_LLM_SERVER_PORT}:9000" + ipc: host + group_add: + - video + security_opt: + - seccomp:unconfined + cap_add: + - SYS_PTRACE + devices: + - /dev/kfd:/dev/kfd + - /dev/dri/${DOCSUM_CARD_ID}:/dev/dri/${DOCSUM_CARD_ID} + - /dev/dri/${DOCSUM_RENDER_ID}:/dev/dri/${DOCSUM_RENDER_ID} + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TGI_LLM_ENDPOINT: "http://${HOST_IP}:${DOCSUM_TGI_SERVICE_PORT}" + HUGGINGFACEHUB_API_TOKEN: ${DOCSUM_HUGGINGFACEHUB_API_TOKEN} + restart: unless-stopped + docsum-backend-server: + image: ${REGISTRY:-opea}/docsum:${TAG:-latest} + container_name: docsum-backend-server + depends_on: + - docsum-tgi-service + - docsum-llm-server + ports: + - "${DOCSUM_BACKEND_SERVER_PORT}:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${HOST_IP} + - LLM_SERVICE_HOST_IP=${HOST_IP} + ipc: host + restart: always + docsum-ui-server: + image: ${REGISTRY:-opea}/docsum-ui:${TAG:-latest} + container_name: docsum-ui-server + depends_on: + - docsum-backend-server + ports: + - "${DOCSUM_FRONTEND_PORT}:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - DOC_BASE_URL="http://${HOST_IP}:${DOCSUM_BACKEND_PORT}/v1/docsum" + ipc: host + restart: always + +networks: + default: + driver: bridge diff --git a/DocSum/docker_compose/amd/gpu/rocm/set_env.sh b/DocSum/docker_compose/amd/gpu/rocm/set_env.sh new file mode 100644 index 000000000..16e9e4750 --- /dev/null +++ b/DocSum/docker_compose/amd/gpu/rocm/set_env.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# Copyright (C) 2024 Advanced Micro Devices, Inc. +# SPDX-License-Identifier: Apache-2.0 + +export DOCSUM_TGI_IMAGE="ghcr.io/huggingface/text-generation-inference:2.3.1-rocm" +export DOCSUM_LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" +export HOST_IP=${host_ip} +export DOCSUM_TGI_SERVICE_PORT="8008" +export DOCSUM_TGI_LLM_ENDPOINT="http://${HOST_IP}:${DOCSUM_TGI_SERVICE_PORT}" +export DOCSUM_HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} +export DOCSUM_LLM_SERVER_PORT="9000" +export DOCSUM_BACKEND_SERVER_PORT="8888" +export DOCSUM_FRONTEND_PORT="5173" +export BACKEND_SERVICE_ENDPOINT="http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum" diff --git a/DocSum/tests/test_compose_on_rocm.sh b/DocSum/tests/test_compose_on_rocm.sh new file mode 100644 index 000000000..a9190007f --- /dev/null +++ b/DocSum/tests/test_compose_on_rocm.sh @@ -0,0 +1,189 @@ +#!/bin/bash +# Copyright (C) 2024 Advanced Micro Devices, Inc. +# SPDX-License-Identifier: Apache-2.0 + +set -xe +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') + +function build_docker_images() { + cd "$WORKPATH"/docker_image_build + git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="docsum docsum-ui llm-docsum-tgi" + docker compose -f build.yaml build "${service_list}" --no-cache > "${LOG_PATH}"/docker_image_build.log + + docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm + docker images && sleep 1s +} + +function start_services() { + cd "$WORKPATH"/docker_compose/amd/gpu/rocm + + export DOCSUM_TGI_IMAGE="ghcr.io/huggingface/text-generation-inference:2.3.1-rocm" + export DOCSUM_LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" + export HOST_IP=${ip_address} + export DOCSUM_TGI_SERVICE_PORT="8008" + export DOCSUM_TGI_LLM_ENDPOINT="http://${HOST_IP}:8008" + export DOCSUM_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} + export DOCSUM_LLM_SERVER_PORT="9000" + export DOCSUM_BACKEND_SERVER_PORT="8888" + export DOCSUM_FRONTEND_PORT="5552" + export MEGA_SERVICE_HOST_IP=${ip_address} + export LLM_SERVICE_HOST_IP=${ip_address} + export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/docsum" + + sed -i "s/backend_address/$ip_address/g" "$WORKPATH"/ui/svelte/.env + + # Start Docker Containers + docker compose up -d > "${LOG_PATH}"/start_services_with_compose.log + + until [[ "$n" -ge 100 ]]; do + docker logs docsum-tgi-service > "${LOG_PATH}"/tgi_service_start.log + if grep -q Connected "${LOG_PATH}"/tgi_service_start.log; then + break + fi + sleep 5s + n=$((n+1)) + done +} + +function validate_services() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + local HTTP_STATUS + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + + CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee "${LOG_PATH}"/"${SERVICE_NAME}".log) + local CONTENT + + if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then + echo "[ $SERVICE_NAME ] Content is as expected." + else + echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" + docker logs "${DOCKER_NAME}" >> "${LOG_PATH}"/"${SERVICE_NAME}".log + exit 1 + fi + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs "${DOCKER_NAME}" >> "${LOG_PATH}"/"${SERVICE_NAME}".log + exit 1 + fi + sleep 1s +} + +function validate_microservices() { + # Check if the microservices are running correctly. + + # tgi for llm service + validate_services \ + "${ip_address}:8008/generate" \ + "generated_text" \ + "tgi-llm" \ + "tgi-service" \ + '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' + + # llm microservice + validate_services \ + "${ip_address}:9000/v1/chat/docsum" \ + "data: " \ + "llm" \ + "llm-docsum-server" \ + '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' +} + +function validate_megaservice() { + local SERVICE_NAME="mega-docsum" + local DOCKER_NAME="docsum-backend-server" + local EXPECTED_RESULT="embedding" + local INPUT_DATA="messages=Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." + local URL="${ip_address}:8888/v1/docsum" + + HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -F "$INPUT_DATA" -H 'Content-Type: multipart/form-data' "$URL") + local HTTP_STATUS + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + + CONTENT=$(curl -s -X POST -F "$INPUT_DATA" -H 'Content-Type: multipart/form-data' "$URL" | tee "${LOG_PATH}"/"${SERVICE_NAME}".log) + local CONTENT + + if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then + echo "[ $SERVICE_NAME ] Content is as expected." + else + echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" + docker logs "${DOCKER_NAME}" >> "${LOG_PATH}"/"${SERVICE_NAME}".log + exit 1 + fi + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs "${DOCKER_NAME}" >> "${LOG_PATH}"/"${SERVICE_NAME}".log + exit 1 + fi + sleep 1s +} + +#function validate_frontend() { +# cd "$WORKPATH"/ui/svelte +# local conda_env_name="OPEA_e2e" +# export PATH=${HOME}/miniforge3/bin/:$PATH +# if conda info --envs | grep -q "$conda_env_name"; then +# echo "$conda_env_name exist!" +# else +# conda create -n ${conda_env_name} python=3.12 -y +# fi +# source activate ${conda_env_name} +# +# sed -i "s/localhost/$ip_address/g" playwright.config.ts +# +# conda install -c conda-forge nodejs -y +# npm install && npm ci && npx playwright install --with-deps +# node -v && npm -v && pip list +# +# exit_status=0 +# npx playwright test || exit_status=$? +# +# if [ $exit_status -ne 0 ]; then +# echo "[TEST INFO]: ---------frontend test failed---------" +# exit $exit_status +# else +# echo "[TEST INFO]: ---------frontend test passed---------" +# fi +#} + +function stop_docker() { + cd "$WORKPATH"/docker_compose/amd/gpu/rocm + docker compose stop && docker compose rm -f +} + +function main() { + + stop_docker + + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + start_services + + validate_microservices + validate_megaservice + #validate_frontend + + stop_docker + echo y | docker system prune + +} + +main From f008f854eaaa89be710ed364d901afaabc42d5ed Mon Sep 17 00:00:00 2001 From: artem-astafev Date: Wed, 13 Nov 2024 16:18:56 +0700 Subject: [PATCH 02/13] fix tests Signed-off-by: artem-astafev --- .../docker_compose/amd/gpu/rocm/compose.yaml | 8 ++--- DocSum/tests/test_compose_on_rocm.sh | 33 ++++++++++--------- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/DocSum/docker_compose/amd/gpu/rocm/compose.yaml b/DocSum/docker_compose/amd/gpu/rocm/compose.yaml index 23c540ed8..203724f7d 100644 --- a/DocSum/docker_compose/amd/gpu/rocm/compose.yaml +++ b/DocSum/docker_compose/amd/gpu/rocm/compose.yaml @@ -27,7 +27,7 @@ services: ipc: host command: --model-id ${DOCSUM_LLM_MODEL_ID} docsum-llm-server: - image: ${REGISTRY:-opea}/llm-docsum-tgi:${TAG:-latest} + image: ${REGISTRY:-opea}/llm-docsum-tgi:latest container_name: docsum-llm-server depends_on: - docsum-tgi-service @@ -52,7 +52,7 @@ services: HUGGINGFACEHUB_API_TOKEN: ${DOCSUM_HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped docsum-backend-server: - image: ${REGISTRY:-opea}/docsum:${TAG:-latest} + image: ${REGISTRY:-opea}/docsum:latest container_name: docsum-backend-server depends_on: - docsum-tgi-service @@ -68,7 +68,7 @@ services: ipc: host restart: always docsum-ui-server: - image: ${REGISTRY:-opea}/docsum-ui:${TAG:-latest} + image: ${REGISTRY:-opea}/docsum-ui:latest container_name: docsum-ui-server depends_on: - docsum-backend-server @@ -78,7 +78,7 @@ services: - no_proxy=${no_proxy} - https_proxy=${https_proxy} - http_proxy=${http_proxy} - - DOC_BASE_URL="http://${HOST_IP}:${DOCSUM_BACKEND_PORT}/v1/docsum" + - DOC_BASE_URL="http://${HOST_IP}:${DOCSUM_BACKEND_SERVER_PORT}/v1/docsum" ipc: host restart: always diff --git a/DocSum/tests/test_compose_on_rocm.sh b/DocSum/tests/test_compose_on_rocm.sh index a9190007f..dd139ff55 100644 --- a/DocSum/tests/test_compose_on_rocm.sh +++ b/DocSum/tests/test_compose_on_rocm.sh @@ -9,6 +9,21 @@ echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" echo "TAG=IMAGE_TAG=${IMAGE_TAG}" export REGISTRY=${IMAGE_REPO} export TAG=${IMAGE_TAG} +export DOCSUM_TGI_IMAGE="ghcr.io/huggingface/text-generation-inference:2.3.1-rocm" +export DOCSUM_LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" +export HOST_IP=${ip_address} +export DOCSUM_TGI_SERVICE_PORT="8008" +export DOCSUM_TGI_LLM_ENDPOINT="http://${HOST_IP}:8008" +export DOCSUM_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} +export DOCSUM_LLM_SERVER_PORT="9000" +export DOCSUM_BACKEND_SERVER_PORT="8888" +export DOCSUM_FRONTEND_PORT="5552" +export MEGA_SERVICE_HOST_IP=${ip_address} +export LLM_SERVICE_HOST_IP=${ip_address} +export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/docsum" +export DOCSUM_CARD_ID="card1" +export DOCSUM_RENDER_ID="renderD136" + WORKPATH=$(dirname "$PWD") LOG_PATH="$WORKPATH/tests" @@ -19,8 +34,7 @@ function build_docker_images() { git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="docsum docsum-ui llm-docsum-tgi" - docker compose -f build.yaml build "${service_list}" --no-cache > "${LOG_PATH}"/docker_image_build.log + docker compose -f build.yaml build --no-cache > "${LOG_PATH}"/docker_image_build.log docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm docker images && sleep 1s @@ -29,18 +43,7 @@ function build_docker_images() { function start_services() { cd "$WORKPATH"/docker_compose/amd/gpu/rocm - export DOCSUM_TGI_IMAGE="ghcr.io/huggingface/text-generation-inference:2.3.1-rocm" - export DOCSUM_LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" - export HOST_IP=${ip_address} - export DOCSUM_TGI_SERVICE_PORT="8008" - export DOCSUM_TGI_LLM_ENDPOINT="http://${HOST_IP}:8008" - export DOCSUM_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export DOCSUM_LLM_SERVER_PORT="9000" - export DOCSUM_BACKEND_SERVER_PORT="8888" - export DOCSUM_FRONTEND_PORT="5552" - export MEGA_SERVICE_HOST_IP=${ip_address} - export LLM_SERVICE_HOST_IP=${ip_address} - export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/docsum" + sed -i "s/backend_address/$ip_address/g" "$WORKPATH"/ui/svelte/.env @@ -174,7 +177,7 @@ function main() { stop_docker - if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi +# if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi start_services validate_microservices From ac937f8bd5131e9b08e722cc08764270a4baf234 Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Wed, 13 Nov 2024 16:26:24 +0700 Subject: [PATCH 03/13] Update compose.yaml Signed-off-by: Artem Astafev --- DocSum/docker_compose/amd/gpu/rocm/compose.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/DocSum/docker_compose/amd/gpu/rocm/compose.yaml b/DocSum/docker_compose/amd/gpu/rocm/compose.yaml index 203724f7d..3e4aa7363 100644 --- a/DocSum/docker_compose/amd/gpu/rocm/compose.yaml +++ b/DocSum/docker_compose/amd/gpu/rocm/compose.yaml @@ -18,6 +18,8 @@ services: shm_size: 1g devices: - /dev/kfd:/dev/kfd + - /dev/dri/${DOCSUM_CARD_ID}:/dev/dri/${DOCSUM_CARD_ID} + - /dev/dri/${DOCSUM_RENDER_ID}:/dev/dri/${DOCSUM_RENDER_ID} cap_add: - SYS_PTRACE group_add: From a1962598ed38a006e81748eac6871f2b68921cd4 Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Wed, 13 Nov 2024 16:31:53 +0700 Subject: [PATCH 04/13] Update test_compose_on_rocm.sh Signed-off-by: Artem Astafev --- DocSum/tests/test_compose_on_rocm.sh | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/DocSum/tests/test_compose_on_rocm.sh b/DocSum/tests/test_compose_on_rocm.sh index dd139ff55..caf608012 100644 --- a/DocSum/tests/test_compose_on_rocm.sh +++ b/DocSum/tests/test_compose_on_rocm.sh @@ -67,13 +67,11 @@ function validate_services() { local DOCKER_NAME="$4" local INPUT_DATA="$5" - HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") - local HTTP_STATUS + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") if [ "$HTTP_STATUS" -eq 200 ]; then echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." - CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee "${LOG_PATH}"/"${SERVICE_NAME}".log) - local CONTENT + local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee "${LOG_PATH}"/"${SERVICE_NAME}".log) if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then echo "[ $SERVICE_NAME ] Content is as expected." @@ -117,13 +115,11 @@ function validate_megaservice() { local INPUT_DATA="messages=Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." local URL="${ip_address}:8888/v1/docsum" - HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -F "$INPUT_DATA" -H 'Content-Type: multipart/form-data' "$URL") - local HTTP_STATUS + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -F "$INPUT_DATA" -H 'Content-Type: multipart/form-data' "$URL") if [ "$HTTP_STATUS" -eq 200 ]; then echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." - CONTENT=$(curl -s -X POST -F "$INPUT_DATA" -H 'Content-Type: multipart/form-data' "$URL" | tee "${LOG_PATH}"/"${SERVICE_NAME}".log) - local CONTENT + local CONTENT=$(curl -s -X POST -F "$INPUT_DATA" -H 'Content-Type: multipart/form-data' "$URL" | tee "${LOG_PATH}"/"${SERVICE_NAME}".log) if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then echo "[ $SERVICE_NAME ] Content is as expected." From 77d3746d31e32eb7f7eac4e59515e4902a6d5e1a Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Wed, 13 Nov 2024 16:56:11 +0700 Subject: [PATCH 05/13] Update test_compose_on_rocm.sh Signed-off-by: Artem Astafev --- DocSum/tests/test_compose_on_rocm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DocSum/tests/test_compose_on_rocm.sh b/DocSum/tests/test_compose_on_rocm.sh index caf608012..3bd4b39ac 100644 --- a/DocSum/tests/test_compose_on_rocm.sh +++ b/DocSum/tests/test_compose_on_rocm.sh @@ -104,7 +104,7 @@ function validate_microservices() { "${ip_address}:9000/v1/chat/docsum" \ "data: " \ "llm" \ - "llm-docsum-server" \ + "docsum-llm-server" \ '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' } From 82f418bf3439672ce570d05cbe2a1ca257b3d6b8 Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Wed, 13 Nov 2024 16:59:27 +0700 Subject: [PATCH 06/13] Update test_compose_on_rocm.sh Signed-off-by: Artem Astafev --- DocSum/tests/test_compose_on_rocm.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/DocSum/tests/test_compose_on_rocm.sh b/DocSum/tests/test_compose_on_rocm.sh index 3bd4b39ac..12bb734dd 100644 --- a/DocSum/tests/test_compose_on_rocm.sh +++ b/DocSum/tests/test_compose_on_rocm.sh @@ -7,6 +7,13 @@ IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" echo "TAG=IMAGE_TAG=${IMAGE_TAG}" + + + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') + export REGISTRY=${IMAGE_REPO} export TAG=${IMAGE_TAG} export DOCSUM_TGI_IMAGE="ghcr.io/huggingface/text-generation-inference:2.3.1-rocm" @@ -24,11 +31,6 @@ export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/docsum" export DOCSUM_CARD_ID="card1" export DOCSUM_RENDER_ID="renderD136" - -WORKPATH=$(dirname "$PWD") -LOG_PATH="$WORKPATH/tests" -ip_address=$(hostname -I | awk '{print $1}') - function build_docker_images() { cd "$WORKPATH"/docker_image_build git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ From 428d28bc9deac1ac8e36c574b1e927e6ef02e77a Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Thu, 14 Nov 2024 12:07:25 +0700 Subject: [PATCH 07/13] Fix README.md Signed-off-by: Artem Astafev --- DocSum/docker_compose/amd/gpu/rocm/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DocSum/docker_compose/amd/gpu/rocm/README.md b/DocSum/docker_compose/amd/gpu/rocm/README.md index 53a48461d..2b5036bcd 100644 --- a/DocSum/docker_compose/amd/gpu/rocm/README.md +++ b/DocSum/docker_compose/amd/gpu/rocm/README.md @@ -1,4 +1,4 @@ -## 🚀 Start Microservices and MegaService +# 🚀 Start Microservices and MegaService ### Required Models From 71419b00e3ab6906e4954486c38340d83b273719 Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Thu, 14 Nov 2024 12:16:46 +0700 Subject: [PATCH 08/13] Update compose.yaml Signed-off-by: Artem Astafev --- DocSum/docker_compose/amd/gpu/rocm/compose.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/DocSum/docker_compose/amd/gpu/rocm/compose.yaml b/DocSum/docker_compose/amd/gpu/rocm/compose.yaml index 3e4aa7363..d2d41bab4 100644 --- a/DocSum/docker_compose/amd/gpu/rocm/compose.yaml +++ b/DocSum/docker_compose/amd/gpu/rocm/compose.yaml @@ -29,7 +29,7 @@ services: ipc: host command: --model-id ${DOCSUM_LLM_MODEL_ID} docsum-llm-server: - image: ${REGISTRY:-opea}/llm-docsum-tgi:latest + image: ${REGISTRY:-opea}/llm-docsum-tgi:${TAG:-latest} container_name: docsum-llm-server depends_on: - docsum-tgi-service @@ -54,7 +54,7 @@ services: HUGGINGFACEHUB_API_TOKEN: ${DOCSUM_HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped docsum-backend-server: - image: ${REGISTRY:-opea}/docsum:latest + image: ${REGISTRY:-opea}/docsum:${TAG:-latest} container_name: docsum-backend-server depends_on: - docsum-tgi-service @@ -70,7 +70,7 @@ services: ipc: host restart: always docsum-ui-server: - image: ${REGISTRY:-opea}/docsum-ui:latest + image: ${REGISTRY:-opea}/docsum-ui:${TAG:-latest} container_name: docsum-ui-server depends_on: - docsum-backend-server From 783b1fa26d06a13a6bd0bfafba4faa8a8c24293d Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Thu, 14 Nov 2024 12:55:00 +0700 Subject: [PATCH 09/13] Update test_compose_on_rocm.sh Signed-off-by: Artem Astafev --- DocSum/tests/test_compose_on_rocm.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/DocSum/tests/test_compose_on_rocm.sh b/DocSum/tests/test_compose_on_rocm.sh index 12bb734dd..a3e008689 100644 --- a/DocSum/tests/test_compose_on_rocm.sh +++ b/DocSum/tests/test_compose_on_rocm.sh @@ -36,7 +36,8 @@ function build_docker_images() { git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - docker compose -f build.yaml build --no-cache > "${LOG_PATH}"/docker_image_build.log + service_list="docsum docsum-ui llm-docsum-tgi" + docker compose -f build.yaml build ${service_list} --no-cache > "${LOG_PATH}"/docker_image_build.log docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm docker images && sleep 1s From 4b60760fbe5470e2a96f9bfc8a0ff507b98c7d9c Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Thu, 14 Nov 2024 13:55:00 +0700 Subject: [PATCH 10/13] Update test_compose_on_rocm.sh Signed-off-by: Artem Astafev --- DocSum/tests/test_compose_on_rocm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DocSum/tests/test_compose_on_rocm.sh b/DocSum/tests/test_compose_on_rocm.sh index a3e008689..00d509d1c 100644 --- a/DocSum/tests/test_compose_on_rocm.sh +++ b/DocSum/tests/test_compose_on_rocm.sh @@ -176,7 +176,7 @@ function main() { stop_docker -# if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi start_services validate_microservices From f07806fec3339d20d6ce14b4b9448a2cc8396869 Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Thu, 14 Nov 2024 14:18:33 +0700 Subject: [PATCH 11/13] Update README.md Signed-off-by: Artem Astafev --- DocSum/docker_compose/amd/gpu/rocm/README.md | 65 +++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) diff --git a/DocSum/docker_compose/amd/gpu/rocm/README.md b/DocSum/docker_compose/amd/gpu/rocm/README.md index 2b5036bcd..f1bb14910 100644 --- a/DocSum/docker_compose/amd/gpu/rocm/README.md +++ b/DocSum/docker_compose/amd/gpu/rocm/README.md @@ -1,4 +1,67 @@ -# 🚀 Start Microservices and MegaService +###### Copyright (C) 2024 Advanced Micro Devices, Inc. + +# Build and deploy DocSum Application on AMD GPU (ROCm) + +## Build images +## 🚀 Build Docker Images + +First of all, you need to build Docker Images locally and install the python package of it. + +### 1. Build LLM Image + +```bash +git clone https://github.com/opea-project/GenAIComps.git +cd GenAIComps +docker build -t opea/llm-docsum-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/summarization/tgi/langchain/Dockerfile . +``` + +Then run the command `docker images`, you will have the following four Docker Images: + +### 2. Build MegaService Docker Image + +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `docsum.py` Python script. Build the MegaService Docker image via below command: + +```bash +git clone https://github.com/opea-project/GenAIExamples +cd GenAIExamples/DocSum/ +docker build -t opea/docsum:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 3. Build UI Docker Image + +Build the frontend Docker image via below command: + +```bash +cd GenAIExamples/DocSum/ui +docker build -t opea/docsum-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile . +``` + +Then run the command `docker images`, you will have the following Docker Images: + +1. `opea/llm-docsum-tgi:latest` +2. `opea/docsum:latest` +3. `opea/docsum-ui:latest` + +### 4. Build React UI Docker Image + +Build the frontend Docker image via below command: + +```bash +cd GenAIExamples/DocSum/ui +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/docsum" +docker build -t opea/docsum-react-ui:latest --build-arg BACKEND_SERVICE_ENDPOINT=$BACKEND_SERVICE_ENDPOINT -f ./docker/Dockerfile.react . + +docker build -t opea/docsum-react-ui:latest --build-arg BACKEND_SERVICE_ENDPOINT=$BACKEND_SERVICE_ENDPOINT --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile.react . +``` + +Then run the command `docker images`, you will have the following Docker Images: + +1. `opea/llm-docsum-tgi:latest` +2. `opea/docsum:latest` +3. `opea/docsum-ui:latest` +4. `opea/docsum-react-ui:latest` + +## 🚀 Start Microservices and MegaService ### Required Models From ca7b3842684fd07fa12089c250165b2c85e6b9bd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 14 Nov 2024 07:18:56 +0000 Subject: [PATCH 12/13] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- DocSum/docker_compose/amd/gpu/rocm/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DocSum/docker_compose/amd/gpu/rocm/README.md b/DocSum/docker_compose/amd/gpu/rocm/README.md index f1bb14910..304beaa1a 100644 --- a/DocSum/docker_compose/amd/gpu/rocm/README.md +++ b/DocSum/docker_compose/amd/gpu/rocm/README.md @@ -3,6 +3,7 @@ # Build and deploy DocSum Application on AMD GPU (ROCm) ## Build images + ## 🚀 Build Docker Images First of all, you need to build Docker Images locally and install the python package of it. From 6e241f6c42eb296f0ea27b8ed5450da451dd68bf Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Fri, 15 Nov 2024 10:48:24 +0700 Subject: [PATCH 13/13] Update README.md Signed-off-by: Artem Astafev --- DocSum/docker_compose/amd/gpu/rocm/README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/DocSum/docker_compose/amd/gpu/rocm/README.md b/DocSum/docker_compose/amd/gpu/rocm/README.md index 304beaa1a..0a40d17f3 100644 --- a/DocSum/docker_compose/amd/gpu/rocm/README.md +++ b/DocSum/docker_compose/amd/gpu/rocm/README.md @@ -1,5 +1,3 @@ -###### Copyright (C) 2024 Advanced Micro Devices, Inc. - # Build and deploy DocSum Application on AMD GPU (ROCm) ## Build images