Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions DocSum/Dockerfile.openEuler
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: Apache-2.0

ARG IMAGE_REPO=opea
ARG BASE_TAG=latest
FROM $IMAGE_REPO/comps-base:$BASE_TAG-openeuler

USER root
# FFmpeg needed for media processing
RUN yum update -y && \
yum install -y ffmpeg && \
yum clean all && rm -rf /var/cache/yum
USER user

COPY ./docsum.py $HOME/docsum.py

ENTRYPOINT ["python", "docsum.py"]
100 changes: 100 additions & 0 deletions DocSum/docker_compose/intel/cpu/xeon/compose_openeuler.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: Apache-2.0

services:
vllm-service:
image: openeuler/vllm-cpu:0.10.1-oe2403lts
container_name: docsum-xeon-vllm-service
ports:
- ${LLM_ENDPOINT_PORT:-8008}:80
volumes:
- "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub"
shm_size: 1g
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
HF_TOKEN: ${HF_TOKEN}
LLM_MODEL_ID: ${LLM_MODEL_ID}
VLLM_TORCH_PROFILER_DIR: "/mnt"
VLLM_CPU_KVCACHE_SPACE: 40
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"]
interval: 10s
timeout: 10s
retries: 100
command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80

llm-docsum-vllm:
image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest}-openeuler
container_name: docsum-xeon-llm-server
depends_on:
vllm-service:
condition: service_healthy
ports:
- ${LLM_PORT:-9000}:9000
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
LLM_ENDPOINT: ${LLM_ENDPOINT}
LLM_MODEL_ID: ${LLM_MODEL_ID}
HF_TOKEN: ${HF_TOKEN}
MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS}
MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS}
DocSum_COMPONENT_NAME: ${DocSum_COMPONENT_NAME}
LOGFLAG: ${LOGFLAG:-False}
restart: unless-stopped

whisper:
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}-openeuler
container_name: docsum-xeon-whisper-server
ports:
- ${ASR_SERVICE_PORT:-7066}:7066
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
restart: unless-stopped

docsum-xeon-backend-server:
image: ${REGISTRY:-opea}/docsum:${TAG:-latest}-openeuler
container_name: docsum-xeon-backend-server
depends_on:
- vllm-service
- llm-docsum-vllm
ports:
- "${BACKEND_SERVICE_PORT:-8888}:8888"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
- LLM_SERVICE_PORT=${LLM_PORT}
- ASR_SERVICE_HOST_IP=${ASR_SERVICE_HOST_IP}
- ASR_SERVICE_PORT=${ASR_SERVICE_PORT}
ipc: host
restart: always

docsum-gradio-ui:
image: ${REGISTRY:-opea}/docsum-gradio-ui:${TAG:-latest}-openeuler
container_name: docsum-xeon-ui-server
depends_on:
- docsum-xeon-backend-server
ports:
- "${FRONTEND_SERVICE_PORT:-5173}:5173"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- BACKEND_SERVICE_ENDPOINT=${BACKEND_SERVICE_ENDPOINT}
- DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always

networks:
default:
driver: bridge
99 changes: 99 additions & 0 deletions DocSum/docker_compose/intel/cpu/xeon/compose_tgi_openeuler.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: Apache-2.0

services:
tgi-server:
image: openeuler/text-generation-inference-cpu:2.4.0-oe2403lts
container_name: docsum-xeon-tgi-server
ports:
- ${LLM_ENDPOINT_PORT:-8008}:80
volumes:
- "${MODEL_CACHE:-./data}:/data"
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT}
HF_TOKEN: ${HF_TOKEN}
host_ip: ${host_ip}
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"]
interval: 10s
timeout: 10s
retries: 100
shm_size: 1g
command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 --max-input-length ${MAX_INPUT_TOKENS} --max-total-tokens ${MAX_TOTAL_TOKENS}

llm-docsum-tgi:
image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest}-openeuler
container_name: docsum-xeon-llm-server
depends_on:
tgi-server:
condition: service_healthy
ports:
- ${LLM_PORT:-9000}:9000
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
LLM_ENDPOINT: ${LLM_ENDPOINT}
LLM_MODEL_ID: ${LLM_MODEL_ID}
HF_TOKEN: ${HF_TOKEN}
MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS}
MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS}
DocSum_COMPONENT_NAME: ${DocSum_COMPONENT_NAME}
LOGFLAG: ${LOGFLAG:-False}
restart: unless-stopped

whisper:
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}-openeuler
container_name: docsum-xeon-whisper-server
ports:
- ${ASR_SERVICE_PORT:-7066}:7066
ipc: host
environment:
no_proxy: ${no_proxy}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
restart: unless-stopped

docsum-xeon-backend-server:
image: ${REGISTRY:-opea}/docsum:${TAG:-latest}-openeuler
container_name: docsum-xeon-backend-server
depends_on:
- tgi-server
- llm-docsum-tgi
ports:
- "${BACKEND_SERVICE_PORT:-8888}:8888"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
- LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP}
- LLM_SERVICE_PORT=${LLM_PORT}
- ASR_SERVICE_HOST_IP=${ASR_SERVICE_HOST_IP}
- ASR_SERVICE_PORT=${ASR_SERVICE_PORT}
ipc: host
restart: always

docsum-gradio-ui:
image: ${REGISTRY:-opea}/docsum-gradio-ui:${TAG:-latest}-openeuler
container_name: docsum-xeon-ui-server
depends_on:
- docsum-xeon-backend-server
ports:
- "${FRONTEND_SERVICE_PORT:-5173}:5173"
environment:
- no_proxy=${no_proxy}
- https_proxy=${https_proxy}
- http_proxy=${http_proxy}
- BACKEND_SERVICE_ENDPOINT=${BACKEND_SERVICE_ENDPOINT}
- DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT}
ipc: host
restart: always

networks:
default:
driver: bridge
47 changes: 47 additions & 0 deletions DocSum/docker_image_build/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,17 @@ services:
context: ../
dockerfile: ./Dockerfile
image: ${REGISTRY:-opea}/docsum:${TAG:-latest}
docsum-openeuler:
build:
args:
IMAGE_REPO: ${REGISTRY}
BASE_TAG: ${TAG}
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
no_proxy: ${no_proxy}
context: ../
dockerfile: ./Dockerfile.openEuler
image: ${REGISTRY:-opea}/docsum:${TAG:-latest}-openeuler
docsum-gradio-ui:
build:
args:
Expand All @@ -22,18 +33,39 @@ services:
dockerfile: ./docker/Dockerfile.gradio
extends: docsum
image: ${REGISTRY:-opea}/docsum-gradio-ui:${TAG:-latest}
docsum-gradio-ui-openeuler:
build:
args:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
context: ../ui
dockerfile: ./docker/Dockerfile.gradio.openEuler
extends: docsum
image: ${REGISTRY:-opea}/docsum-gradio-ui:${TAG:-latest}-openeuler
docsum-ui:
build:
context: ../ui
dockerfile: ./docker/Dockerfile
extends: docsum
image: ${REGISTRY:-opea}/docsum-ui:${TAG:-latest}
docsum-ui-openeuler:
build:
context: ../ui
dockerfile: ./docker/Dockerfile.openEuler
extends: docsum
image: ${REGISTRY:-opea}/docsum-ui:${TAG:-latest}-openeuler
docsum-react-ui:
build:
context: ../ui
dockerfile: ./docker/Dockerfile.react
extends: docsum
image: ${REGISTRY:-opea}/docsum-react-ui:${TAG:-latest}
docsum-react-ui-openeuler:
build:
context: ../ui
dockerfile: ./docker/Dockerfile.react.openEuler
extends: docsum
image: ${REGISTRY:-opea}/docsum-react-ui:${TAG:-latest}-openeuler
whisper:
build:
args:
Expand All @@ -43,12 +75,27 @@ services:
dockerfile: comps/third_parties/whisper/src/Dockerfile
extends: docsum
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
whisper-openeuler:
build:
args:
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
context: GenAIComps
dockerfile: comps/third_parties/whisper/src/Dockerfile.openEuler
extends: docsum
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}-openeuler
llm-docsum:
build:
context: GenAIComps
dockerfile: comps/llms/src/doc-summarization/Dockerfile
extends: docsum
image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest}
llm-docsum:
build:
context: GenAIComps
dockerfile: comps/llms/src/doc-summarization/Dockerfile.openEuler
extends: docsum
image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest}-openeuler
vllm-rocm:
build:
context: GenAIComps
Expand Down
Loading
Loading