|
| 1 | +# Copyright (C) 2025 Zensar Technologies Private Ltd. |
| 2 | +# SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +import asyncio |
| 5 | +import base64 |
| 6 | +import json |
| 7 | +import os |
| 8 | +import subprocess |
| 9 | +import uuid |
| 10 | +from typing import List |
| 11 | + |
| 12 | +from comps import MegaServiceEndpoint, MicroService, ServiceOrchestrator, ServiceRoleType, ServiceType |
| 13 | +from comps.cores.mega.utils import handle_message |
| 14 | +from comps.cores.proto.api_protocol import ( |
| 15 | + ArbPostHearingAssistantChatCompletionRequest, |
| 16 | + ChatCompletionRequest, |
| 17 | + ChatCompletionResponse, |
| 18 | + ChatCompletionResponseChoice, |
| 19 | + ChatMessage, |
| 20 | + UsageInfo, |
| 21 | +) |
| 22 | +from fastapi import Request |
| 23 | +from fastapi.responses import StreamingResponse |
| 24 | + |
| 25 | +MEGA_SERVICE_PORT = int(os.getenv("MEGA_SERVICE_PORT", 8888)) |
| 26 | + |
| 27 | +LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0") |
| 28 | +LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000)) |
| 29 | + |
| 30 | + |
| 31 | +def align_inputs(self, inputs, cur_node, runtime_graph, llm_parameters_dict, **kwargs): |
| 32 | + if self.services[cur_node].service_type == ServiceType.ARB_POST_HEARING_ASSISTANT: |
| 33 | + for key_to_replace in ["text", "asr_result"]: |
| 34 | + if key_to_replace in inputs: |
| 35 | + inputs["messages"] = inputs[key_to_replace] |
| 36 | + del inputs[key_to_replace] |
| 37 | + |
| 38 | + arbPostHearingAssistant_parameters = kwargs.get("arbPostHearingAssistant_parameters", None) |
| 39 | + if arbPostHearingAssistant_parameters: |
| 40 | + arbPostHearingAssistant_parameters = arbPostHearingAssistant_parameters.model_dump() |
| 41 | + del arbPostHearingAssistant_parameters["messages"] |
| 42 | + inputs.update(arbPostHearingAssistant_parameters) |
| 43 | + if "id" in inputs: |
| 44 | + del inputs["id"] |
| 45 | + if "max_new_tokens" in inputs: |
| 46 | + del inputs["max_new_tokens"] |
| 47 | + if "input" in inputs: |
| 48 | + del inputs["input"] |
| 49 | + return inputs |
| 50 | + |
| 51 | + |
| 52 | +def align_outputs(self, data, *args, **kwargs): |
| 53 | + return data |
| 54 | + |
| 55 | + |
| 56 | +class OpeaArbPostHearingAssistantService: |
| 57 | + def __init__(self, host="0.0.0.0", port=8000): |
| 58 | + self.host = host |
| 59 | + self.port = port |
| 60 | + ServiceOrchestrator.align_inputs = align_inputs |
| 61 | + ServiceOrchestrator.align_outputs = align_outputs |
| 62 | + self.megaservice = ServiceOrchestrator() |
| 63 | + self.endpoint = "/v1/arb-post-hearing" |
| 64 | + |
| 65 | + def add_remote_service(self): |
| 66 | + |
| 67 | + arb_post_hearing_assistant = MicroService( |
| 68 | + name="opea_service@arb_post_hearing_assistant", |
| 69 | + host=LLM_SERVICE_HOST_IP, |
| 70 | + port=LLM_SERVICE_PORT, |
| 71 | + endpoint="/v1/arb-post-hearing", |
| 72 | + use_remote_service=True, |
| 73 | + service_type=ServiceType.ARB_POST_HEARING_ASSISTANT, |
| 74 | + ) |
| 75 | + self.megaservice.add(arb_post_hearing_assistant) |
| 76 | + |
| 77 | + async def handle_request(self, request: Request): |
| 78 | + """Accept pure text.""" |
| 79 | + if "application/json" in request.headers.get("content-type"): |
| 80 | + data = await request.json() |
| 81 | + chunk_size = data.get("chunk_size", -1) |
| 82 | + chunk_overlap = data.get("chunk_overlap", -1) |
| 83 | + chat_request = ArbPostHearingAssistantChatCompletionRequest.model_validate(data) |
| 84 | + prompt = handle_message(chat_request.messages) |
| 85 | + print(f"messages:{chat_request.messages}") |
| 86 | + print(f"prompt: {prompt}") |
| 87 | + initial_inputs_data = {data["type"]: prompt} |
| 88 | + else: |
| 89 | + raise ValueError(f"Unknown request type: {request.headers.get('content-type')}") |
| 90 | + |
| 91 | + arbPostHearingAssistant_parameters = ArbPostHearingAssistantChatCompletionRequest( |
| 92 | + messages=chat_request.messages, |
| 93 | + max_tokens=chat_request.max_tokens if chat_request.max_tokens else 1024, |
| 94 | + top_k=chat_request.top_k if chat_request.top_k else 10, |
| 95 | + top_p=chat_request.top_p if chat_request.top_p else 0.95, |
| 96 | + temperature=chat_request.temperature if chat_request.temperature else 0.01, |
| 97 | + frequency_penalty=chat_request.frequency_penalty if chat_request.frequency_penalty else 0.0, |
| 98 | + presence_penalty=chat_request.presence_penalty if chat_request.presence_penalty else 0.0, |
| 99 | + repetition_penalty=chat_request.repetition_penalty if chat_request.repetition_penalty else 1.03, |
| 100 | + model=chat_request.model if chat_request.model else None, |
| 101 | + language=chat_request.language if chat_request.language else "en", |
| 102 | + chunk_overlap=chunk_overlap, |
| 103 | + chunk_size=chunk_size, |
| 104 | + ) |
| 105 | + result_dict, runtime_graph = await self.megaservice.schedule( |
| 106 | + initial_inputs=initial_inputs_data, arbPostHearingAssistant_parameters=arbPostHearingAssistant_parameters |
| 107 | + ) |
| 108 | + |
| 109 | + for node, response in result_dict.items(): |
| 110 | + # Here it suppose the last microservice in the megaservice is LLM. |
| 111 | + if ( |
| 112 | + isinstance(response, StreamingResponse) |
| 113 | + and node == list(self.megaservice.services.keys())[-1] |
| 114 | + and self.megaservice.services[node].service_type == ServiceType.ARB_POST_HEARING_ASSISTANT |
| 115 | + ): |
| 116 | + return response |
| 117 | + |
| 118 | + last_node = runtime_graph.all_leaves()[-1] |
| 119 | + response = result_dict[last_node]["text"] |
| 120 | + choices = [] |
| 121 | + usage = UsageInfo() |
| 122 | + choices.append( |
| 123 | + ChatCompletionResponseChoice( |
| 124 | + index=0, |
| 125 | + message=ChatMessage(role="assistant", content=response), |
| 126 | + finish_reason="stop", |
| 127 | + ) |
| 128 | + ) |
| 129 | + return ChatCompletionResponse(model="arbPostHearingAssistant", choices=choices, usage=usage) |
| 130 | + |
| 131 | + def start(self): |
| 132 | + self.service = MicroService( |
| 133 | + self.__class__.__name__, |
| 134 | + service_role=ServiceRoleType.MEGASERVICE, |
| 135 | + host=self.host, |
| 136 | + port=self.port, |
| 137 | + endpoint=self.endpoint, |
| 138 | + input_datatype=ArbPostHearingAssistantChatCompletionRequest, |
| 139 | + output_datatype=ChatCompletionResponse, |
| 140 | + ) |
| 141 | + self.service.add_route(self.endpoint, self.handle_request, methods=["POST"]) |
| 142 | + self.service.start() |
| 143 | + |
| 144 | + |
| 145 | +if __name__ == "__main__": |
| 146 | + arbPostHearingAssistant = OpeaArbPostHearingAssistantService(port=MEGA_SERVICE_PORT) |
| 147 | + arbPostHearingAssistant.add_remote_service() |
| 148 | + arbPostHearingAssistant.start() |
0 commit comments