From 212866d7dfac0932dff24f1bb599c37204654327 Mon Sep 17 00:00:00 2001 From: Dhruv Diddi Date: Sun, 17 Nov 2024 20:38:45 -0800 Subject: [PATCH 1/6] feat: add locust and graphana for benchmarking --- solo_server/base.py | 138 +++++++++--------- .../benchmark/docker-compose-benchmark.yml | 63 ++++++++ solo_server/benchmark/grafana_setup.sh | 20 +++ solo_server/benchmark/locustfile.py | 13 ++ solo_server/docker-compose.yml | 2 + 5 files changed, 170 insertions(+), 66 deletions(-) create mode 100644 solo_server/benchmark/docker-compose-benchmark.yml create mode 100644 solo_server/benchmark/grafana_setup.sh create mode 100644 solo_server/benchmark/locustfile.py diff --git a/solo_server/base.py b/solo_server/base.py index 4af1217..9900252 100644 --- a/solo_server/base.py +++ b/solo_server/base.py @@ -1,117 +1,123 @@ import typer -from subprocess import run, CalledProcessError +from subprocess import run, CalledProcessError, DEVNULL import os +import sys app = typer.Typer(help="šŸ› ļø Solo Server CLI for managing edge AI model inference using Docker-style commands.") def execute_command(command: list): + """Utility function to execute shell commands.""" try: run(command, check=True) except CalledProcessError as e: typer.echo(f"āŒ Error: {e}") raise typer.Exit(code=1) -# Recurring prompt to ask for the next command -@app.command() -def prompt(): - """ - šŸ”„ Recurring prompt for managing the Solo Server. - """ - while True: - typer.echo("\nWhat would you like to do?") - typer.echo("1. šŸš€ Start the Solo Server") - typer.echo("2. ā¹ Stop the Solo Server") - typer.echo("3. šŸ“ˆ Check the Solo Server status") - typer.echo("4. šŸ–Œļø Generate a code base template") - typer.echo("5. āŒ Exit") - choice = typer.prompt("Enter the number of your choice") +def check_docker_installation(): + """Ensure Docker and Docker Compose are installed and user has necessary permissions.""" + typer.echo("šŸ” Checking Docker and Docker Compose installation...") - if choice == "1": - tag = typer.prompt("Enter the tag name to start the server with") - start(tag) - elif choice == "2": - stop() - elif choice == "3": - status() - elif choice == "4": - tag = typer.prompt("Enter the tag name for the code base template") - gen(tag) - elif choice == "5": - typer.echo("āŒ Exiting the Solo Server CLI. Goodbye!") - break - else: - typer.echo("āš ļø Invalid choice. Please try again.") + # Check Docker + try: + run(["docker", "--version"], stdout=DEVNULL, stderr=DEVNULL, check=True) + except FileNotFoundError: + typer.echo("āŒ Docker is not installed. Installing Docker...") + execute_command([ + "curl", "-fsSL", "https://get.docker.com", "|", "sh" + ]) + except CalledProcessError: + typer.echo("āŒ Docker is installed but not accessible. Please ensure you have the correct permissions.") + typer.echo("šŸ”‘ Run the following to add your user to the Docker group:") + typer.echo(" sudo usermod -aG docker $USER && newgrp docker") + sys.exit(1) -# Command to start the Solo Server, expects a tag name + # Check Docker Compose + try: + run(["docker-compose", "--version"], stdout=DEVNULL, stderr=DEVNULL, check=True) + except FileNotFoundError: + typer.echo("āŒ Docker Compose is not installed. Installing Docker Compose...") + execute_command([ + "curl", "-L", "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)", + "-o", "/usr/local/bin/docker-compose" + ]) + execute_command(["chmod", "+x", "/usr/local/bin/docker-compose"]) + except CalledProcessError: + typer.echo("āŒ Docker Compose is installed but not accessible.") + sys.exit(1) + + typer.echo("āœ… Docker and Docker Compose are installed and accessible.") @app.command() -def start( - tag: str, - model_url: str = typer.Option( - None, - "--model-url", "-u", - help="URL for the LLM model (only used with llm tag)" - ), - model_filename: str = typer.Option( - None, - "--model-filename", "-f", - help="Filename for the LLM model (only used with llm tag)" - ) -): +def start(tag: str): """ šŸš€ Start the Solo Server for model inference. """ + check_docker_installation() typer.echo(f"šŸš€ Starting the Solo Server with tag: {tag}...") - - if tag == "llm": - # Default values for llm tag - default_url = "https://huggingface.co/Mozilla/Llama-3.2-1B-Instruct-llamafile/resolve/main/Llama-3.2-1B-Instruct.Q6_K.llamafile" - default_filename = "Llama-3.2-1B-Instruct.Q6_K.llamafile" - - # Use provided values or defaults - os.environ["MODEL_URL"] = model_url or default_url - os.environ["MODEL_FILENAME"] = model_filename or default_filename - elif (model_url or model_filename) and tag != "llm": - typer.echo("āš ļø Warning: model-url and model-filename are only used with the llm tag") - python_file = f"templates/{tag}.py" os.environ["PYTHON_FILE"] = python_file - - # Get the current file's directory and construct the full path current_dir = os.path.dirname(os.path.abspath(__file__)) docker_compose_path = os.path.join(current_dir, "docker-compose.yml") - execute_command(["docker-compose", "-f", docker_compose_path, "up", "--build"]) + execute_command(["docker-compose", "-f", docker_compose_path, "up", "-d"]) -# Command to stop the Solo Server @app.command() def stop(): """ ā¹ Stop the running Solo Server. """ + check_docker_installation() typer.echo("ā¹ Stopping the Solo Server...") current_dir = os.path.dirname(os.path.abspath(__file__)) docker_compose_path = os.path.join(current_dir, "docker-compose.yml") execute_command(["docker-compose", "-f", docker_compose_path, "down"]) -# Command to check the status of the Solo Server @app.command() def status(): """ šŸ“ˆ Check the status of the Solo Server. """ + check_docker_installation() typer.echo("šŸ“ˆ Checking Solo Server status...") current_dir = os.path.dirname(os.path.abspath(__file__)) docker_compose_path = os.path.join(current_dir, "docker-compose.yml") execute_command(["docker-compose", "-f", docker_compose_path, "ps"]) -# Command to generate a code base template related to the tag @app.command() -def gen(tag: str): +def benchmark(): """ - šŸ–Œļø Generate a code base template related to the tag. + šŸŽļø Run a benchmark test on the Solo Server with TimescaleDB and Grafana integration. """ - typer.echo(f"šŸ–Œļø Generating code base template for tag: {tag}...") - # Add logic to generate a template based on the provided tag + check_docker_installation() + typer.echo("šŸŽļø Starting benchmark test...") + current_dir = os.path.dirname(os.path.abspath(__file__)) + docker_compose_path = os.path.join(current_dir, "docker-compose.yml") + + # Start TimescaleDB and Grafana + typer.echo("šŸ› ļø Setting up Grafana and TimescaleDB...") + execute_command(["docker-compose", "-f", docker_compose_path, "up", "-d"]) + + # Run Locust benchmark + locust_command = [ + "locust", + "--headless", + "--users", "10", + "--spawn-rate", "2", + "--run-time", "1m", + "--host", "http://localhost:8000", + "--timescale" + ] + + try: + execute_command(locust_command) + except Exception as e: + typer.echo(f"āŒ Benchmark failed: {e}") + else: + typer.echo("āœ… Benchmark test completed successfully.") + typer.echo("šŸ“Š Visit Grafana at http://localhost:3000 to view the results.") + + # Teardown + typer.echo("ā¹ Stopping Grafana and TimescaleDB...") + execute_command(["docker-compose", "-f", docker_compose_path, "down"]) if __name__ == "__main__": app() diff --git a/solo_server/benchmark/docker-compose-benchmark.yml b/solo_server/benchmark/docker-compose-benchmark.yml new file mode 100644 index 0000000..d27c00d --- /dev/null +++ b/solo_server/benchmark/docker-compose-benchmark.yml @@ -0,0 +1,63 @@ +version: '3.7' + +services: + timescale: + image: cyberw/locust-timescale:latest + container_name: timescale_postgres + environment: + POSTGRES_PASSWORD: password + PGDATA: /var/lib/postgresql/data + ports: + - "5432:5432" + volumes: + - timescale_postgres_data:/var/lib/postgresql/data + + grafana: + image: grafana/grafana:latest + container_name: timescale_grafana + ports: + - "3000:3000" + environment: + - GF_SECURITY_ADMIN_PASSWORD=admin + depends_on: + - timescale + volumes: + - grafana_data:/var/lib/grafana + + locust: + build: + context: ./app/benchmark + container_name: locust_benchmark + command: > + locust + --headless + --users 10 + --spawn-rate 2 + --run-time 1m + --host http://solo-api:8000 + --timescale + depends_on: + - solo-api + environment: + LOCUST_HOST: http://solo-api:8000 + networks: + - solo-network + + solo-api: + build: + context: . + args: + PYTHON_FILE: ${PYTHON_FILE:-solo_server/templates/basic.py} + container_name: "solo-api" + environment: + - PYTHON_FILE=${PYTHON_FILE:-solo_server/templates/basic.py} + - MODEL_URL=${MODEL_URL:-your_model_url_here} + - MODEL_FILENAME=${MODEL_FILENAME:-your_model_filename_here} + +volumes: + timescale_postgres_data: + grafana_data: + +networks: + solo-network: + driver: bridge diff --git a/solo_server/benchmark/grafana_setup.sh b/solo_server/benchmark/grafana_setup.sh new file mode 100644 index 0000000..d425607 --- /dev/null +++ b/solo_server/benchmark/grafana_setup.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +GRAFANA_URL="http://localhost:3000" +ADMIN_PASSWORD="admin" +DATASOURCE_NAME="TimescaleDB" + +# Add a new TimescaleDB datasource +curl -X POST -H "Content-Type: application/json" \ + -u admin:$ADMIN_PASSWORD \ + -d '{ + "name": "'"$DATASOURCE_NAME"'", + "type": "postgres", + "url": "timescale:5432", + "access": "proxy", + "database": "locust", + "user": "postgres", + "password": "password", + "isDefault": true + }' \ + $GRAFANA_URL/api/datasources diff --git a/solo_server/benchmark/locustfile.py b/solo_server/benchmark/locustfile.py new file mode 100644 index 0000000..3c13f97 --- /dev/null +++ b/solo_server/benchmark/locustfile.py @@ -0,0 +1,13 @@ +from locust import HttpUser, task + +class SoloServerUser(HttpUser): + wait_time = lambda self: 0 # No wait between tasks + + @task + def generate_text(self): + """Simulates text generation requests.""" + payload = { + "prompt": "Generate a Solo app with load tests.", + "max_tokens": 100, + } + self.client.post("/v1/completions", json=payload) diff --git a/solo_server/docker-compose.yml b/solo_server/docker-compose.yml index 8cd3c3c..689d459 100644 --- a/solo_server/docker-compose.yml +++ b/solo_server/docker-compose.yml @@ -1,3 +1,5 @@ +version: '3.7' + services: solo-api: build: From c0cff6ca6ca7cb731f504b9a5b2fdbdc19e1cdd2 Mon Sep 17 00:00:00 2001 From: Dhruv Diddi Date: Sun, 17 Nov 2024 21:14:42 -0800 Subject: [PATCH 2/6] fix: add locust base command support --- solo_server/base.py | 7 +------ solo_server/{benchmark => }/docker-compose-benchmark.yml | 0 solo_server/{benchmark => }/grafana_setup.sh | 0 solo_server/{benchmark => }/locustfile.py | 0 solo_server/requirements.txt | 5 ++++- 5 files changed, 5 insertions(+), 7 deletions(-) rename solo_server/{benchmark => }/docker-compose-benchmark.yml (100%) rename solo_server/{benchmark => }/grafana_setup.sh (100%) rename solo_server/{benchmark => }/locustfile.py (100%) diff --git a/solo_server/base.py b/solo_server/base.py index 9900252..cec8722 100644 --- a/solo_server/base.py +++ b/solo_server/base.py @@ -99,16 +99,11 @@ def benchmark(): # Run Locust benchmark locust_command = [ "locust", - "--headless", - "--users", "10", - "--spawn-rate", "2", - "--run-time", "1m", - "--host", "http://localhost:8000", "--timescale" ] try: - execute_command(locust_command) + print(execute_command(locust_command)) except Exception as e: typer.echo(f"āŒ Benchmark failed: {e}") else: diff --git a/solo_server/benchmark/docker-compose-benchmark.yml b/solo_server/docker-compose-benchmark.yml similarity index 100% rename from solo_server/benchmark/docker-compose-benchmark.yml rename to solo_server/docker-compose-benchmark.yml diff --git a/solo_server/benchmark/grafana_setup.sh b/solo_server/grafana_setup.sh similarity index 100% rename from solo_server/benchmark/grafana_setup.sh rename to solo_server/grafana_setup.sh diff --git a/solo_server/benchmark/locustfile.py b/solo_server/locustfile.py similarity index 100% rename from solo_server/benchmark/locustfile.py rename to solo_server/locustfile.py diff --git a/solo_server/requirements.txt b/solo_server/requirements.txt index 4602da1..83d748c 100644 --- a/solo_server/requirements.txt +++ b/solo_server/requirements.txt @@ -9,4 +9,7 @@ Pillow diffusers accelerate huggingface_hub -qai-hub-models[stable_diffusion_v2_1_quantized] \ No newline at end of file +qai-hub-models[stable_diffusion_v2_1_quantized] +typer +locust +locust-plugins \ No newline at end of file From 82519ff4f677534185b539c4438e656c620df35e Mon Sep 17 00:00:00 2001 From: Dhruv Diddi Date: Sun, 17 Nov 2024 21:24:23 -0800 Subject: [PATCH 3/6] fix: small print change --- solo_server/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solo_server/base.py b/solo_server/base.py index cec8722..8312bc3 100644 --- a/solo_server/base.py +++ b/solo_server/base.py @@ -103,7 +103,7 @@ def benchmark(): ] try: - print(execute_command(locust_command)) + execute_command(locust_command) except Exception as e: typer.echo(f"āŒ Benchmark failed: {e}") else: From 666567a2fea57e378ba14bdb790de3e1c13f4978 Mon Sep 17 00:00:00 2001 From: Dhruv Diddi Date: Mon, 18 Nov 2024 13:13:23 -0800 Subject: [PATCH 4/6] feat: add tool calling with streamlit gui and benchmark --- solo_server/base.py | 21 ++++ solo_server/templates/streamlit_llm.py | 111 ++++++++++++++++++ solo_server/templates/tools/__init__.py | 9 ++ .../templates/tools/get_top_hf_papers.py | 105 +++++++++++++++++ solo_server/templates/utils.py | 14 +++ 5 files changed, 260 insertions(+) create mode 100644 solo_server/templates/streamlit_llm.py create mode 100644 solo_server/templates/tools/__init__.py create mode 100644 solo_server/templates/tools/get_top_hf_papers.py create mode 100644 solo_server/templates/utils.py diff --git a/solo_server/base.py b/solo_server/base.py index 8312bc3..6096dac 100644 --- a/solo_server/base.py +++ b/solo_server/base.py @@ -114,5 +114,26 @@ def benchmark(): typer.echo("ā¹ Stopping Grafana and TimescaleDB...") execute_command(["docker-compose", "-f", docker_compose_path, "down"]) +@app.command() +def gui(): + """ + šŸ–„ļø Launch the Streamlit GUI for Solo Server. + """ + typer.echo("šŸ–„ļø Launching Streamlit app...") + + # Run Streamlit + streamlit_command = [ + "streamlit", + "run", + "templates/streamlit_llm.py" + ] + + try: + print(execute_command(streamlit_command)) + except Exception as e: + typer.echo(f"āŒ Failed to launch Streamlit app: {e}") + else: + typer.echo("āœ… Streamlit app launched successfully.") + if __name__ == "__main__": app() diff --git a/solo_server/templates/streamlit_llm.py b/solo_server/templates/streamlit_llm.py new file mode 100644 index 0000000..6318669 --- /dev/null +++ b/solo_server/templates/streamlit_llm.py @@ -0,0 +1,111 @@ +import json + +import streamlit as st +from openai import OpenAI +from tools import available_tools, functions + +from utils import display_message + +# define model +MODEL = "meta-llama/Meta-Llama-3.1-8B-Instruct" +SYSTEM_MESSAGE = { + "role": "system", + "content": "You are a helpful assistant with tool calling capabilities. When you receive a tool call response, use the output to format an answer to the orginal use question.", +} + +client = OpenAI( + base_url="http://127.0.0.1:8001/v1", + api_key="lit", +) + +st.title("Chat with an AI Assistant.") + +# Initialize chat history +if "messages" not in st.session_state: + st.session_state.messages = [] + +# Add input field for system prompt +st.sidebar.header("System Prompt") +system_prompt = st.sidebar.text_area( + label="Modify the prompt here.", value=SYSTEM_MESSAGE["content"], height=200 +) +SYSTEM_MESSAGE["content"] = system_prompt + + +# Add checkboxes to the sidebar +st.sidebar.header("Available Tools") +selected_tools = [ + tool["function"]["name"] + for tool in available_tools + if st.sidebar.checkbox(tool["function"]["name"], value=True) +] + +# Filter available tools based on selected tools +tools = [tool for tool in available_tools if tool["function"]["name"] in selected_tools] + +# Display chat messages from history on app rerun +for message in st.session_state.messages: + display_message(message) + +# Accept user input +if prompt := st.chat_input("Ask anything?"): + # Add user message to chat history + st.session_state.messages.append({"role": "user", "content": prompt}) + # Display user message in chat message container + with st.chat_message("user"): + st.markdown(prompt) + + # Display assistant response in chat message container + with st.chat_message("assistant"): + messages = [SYSTEM_MESSAGE, *st.session_state.messages] + if not tools: + stream = client.chat.completions.create( + model=MODEL, + messages=messages, + stream=True, + ) + response = st.write_stream(stream) + st.session_state.messages.append({"role": "assistant", "content": response}) + else: + spinner = st.spinner("Thinking...") + response = client.chat.completions.create( + model=MODEL, + messages=messages, + tools=available_tools, + tool_choice="auto", + ) + response_message = response.choices[0].message + tool_calls = response_message.tool_calls + if tool_calls: + with st.status("Thinking...", expanded=True) as status: + st.session_state.messages.append(response_message) + for tool_call in tool_calls: + function_name = tool_call.function.name + tool = functions[function_name] + args = json.loads(tool_call.function.arguments) + st.write(f"Calling {function_name}... with args: {args}") + tool_response = tool(**args) + st.session_state.messages.append( + { + "tool_call_id": tool_call.id, + "role": "ipython", + "content": tool_response, + "name": function_name, + } + ) + status.update( + label=f"Running {function_name}... Done!", + state="complete", + expanded=False, + ) + stream = client.chat.completions.create( + model=MODEL, messages=st.session_state.messages, stream=True + ) + response = st.write_stream(stream) + st.session_state.messages.append( + {"role": "assistant", "content": response} + ) + else: + response = response.choices[0].message + st.write(response.content) + st.session_state.messages.append(response) diff --git a/solo_server/templates/tools/__init__.py b/solo_server/templates/tools/__init__.py new file mode 100644 index 0000000..3b4f9aa --- /dev/null +++ b/solo_server/templates/tools/__init__.py @@ -0,0 +1,9 @@ +from .get_top_hf_papers import get_top_hf_papers, get_top_hf_papers_json + +available_tools = [ + get_top_hf_papers_json, +] + +functions = { + "get_top_hf_papers": get_top_hf_papers, +} diff --git a/solo_server/templates/tools/get_top_hf_papers.py b/solo_server/templates/tools/get_top_hf_papers.py new file mode 100644 index 0000000..0b1361a --- /dev/null +++ b/solo_server/templates/tools/get_top_hf_papers.py @@ -0,0 +1,105 @@ +import json +import requests +from bs4 import BeautifulSoup + + +def get_top_hf_papers(n: int): + """ + Fetches the top N papers from the Hugging Face papers page based on the number of votes. + """ + url = "https://huggingface.co/papers" + response = requests.get(url) + if response.status_code != 200: + raise Exception(f"Failed to retrieve papers: {response.status_code}") + + soup = BeautifulSoup(response.text, "html.parser") + papers = soup.find_all("article") + + paper_info = [] + for paper in papers: + title = paper.find("h3").text.strip() if paper.find("h3") else "No Title" + link = paper.find("a")["href"] if paper.find("a") else "#" + vote_info = paper.find( + "div", {"class": "flex flex-wrap items-center gap-2.5 pt-1"} + ).find("div", {"class": "leading-none"}) + thumbnail = paper.find("img")["src"] if paper.find("img") else "" + author_list = paper.find( + "ul", {"class": "flex items-center flex-row-reverse text-sm"} + ) + + authors = [] + if author_list: + for author in author_list.find_all("li"): + if author.has_attr("title"): + authors.append(author["title"]) + + paper_info.append( + { + "title": title, + "link": link, + "votes": int(vote_info.text.strip()) + if vote_info and vote_info.text.strip().isdigit() + else 0, + "thumbnail": thumbnail, + "authors": ", ".join(authors) if authors else "Unknown", + } + ) + + paper_info.sort(key=lambda x: x["votes"], reverse=True) + top_papers = paper_info[:n] + + for i, paper in enumerate(top_papers): + paper_url = f"https://huggingface.co{paper['link']}" + paper_response = requests.get(paper_url) + if paper_response.status_code != 200: + print( + f"Failed to retrieve paper details for {paper['title']}: {paper_response.status_code}" + ) + continue + + paper_soup = BeautifulSoup(paper_response.text, "html.parser") + published_date_div = paper_soup.find( + "div", + { + "class": "mb-6 flex flex-wrap gap-2 text-sm text-gray-500 max-sm:flex-col sm:items-center sm:text-base md:mb-8" + }, + ).find("div") + published_date_text = "" + if published_date_div: + published_date_text = published_date_div.text.split("Published on ")[ + 1 + ].strip() + + abstract_div = paper_soup.find("div", {"class": "pb-8 pr-4 md:pr-16"}).find("p") + abstract = ( + abstract_div.text.strip() if abstract_div else "No abstract available" + ) + + top_papers[i]["published_date"] = published_date_text + top_papers[i]["abstract"] = abstract + + return json.dumps(top_papers, indent=2) + + +get_top_hf_papers_json = { + "type": "function", + "function": { + "name": "get_top_hf_papers", + "description": "Get the top N papers from the Hugging Face papers page based on the number of votes.", + "parameters": { + "type": "object", + "properties": { + "n": { + "type": "integer", + "description": "Number of top papers to fetch.", + } + }, + "required": ["n"], + }, + }, +} + +if __name__ == "__main__": + top_papers = get_top_hf_papers(5) + for paper in top_papers: + print(f"Title: {paper['title']}") diff --git a/solo_server/templates/utils.py b/solo_server/templates/utils.py new file mode 100644 index 0000000..3486923 --- /dev/null +++ b/solo_server/templates/utils.py @@ -0,0 +1,14 @@ +import streamlit as st + + +def display_message(message): + if isinstance(message, dict): + role = message.get("role") + content = message.get("content") + else: + role = message.role + content = message.content + + if role in ["system", "assistant", "user"] and content: + with st.chat_message(role): + st.markdown(content) From 607b12c8861c34bae9a018f05e8343d49256c839 Mon Sep 17 00:00:00 2001 From: Dhruv Diddi Date: Mon, 18 Nov 2024 13:14:05 -0800 Subject: [PATCH 5/6] fix: remove unused dist files --- dist/solo_server-0.2.6-py3-none-any.whl | Bin 11549 -> 0 bytes dist/solo_server-0.2.6.tar.gz | Bin 9031 -> 0 bytes 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 dist/solo_server-0.2.6-py3-none-any.whl delete mode 100644 dist/solo_server-0.2.6.tar.gz diff --git a/dist/solo_server-0.2.6-py3-none-any.whl b/dist/solo_server-0.2.6-py3-none-any.whl deleted file mode 100644 index aecf19e2b2e5902c4dec21c83d333ebdeaa8881e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11549 zcmeI2WmKHolCY8BPH>0d4#C|Wf(3VITpDOxf)kt&Bnj^B79c>d;BLW!2X`mPbncxq za5(4Ao%uQ6`u19H_v&8jeYVxEs$Ese^3cz)ARr*%A+|EDwcB3`(SCJeQIad>YEf>Xsd6`eQ_3W z>gU$k%a0jJsu?cdKARvTD!DIHK=E2vi9Hi+Vky3uDGI=Mv+vRSD57V>Ga;jfo;p1) z(*j4Q8_HQ+2LqIrjTGBBPeCs$Uq%YNyy4ZPiO3<d``QUs`Z9!_axvCy;qn*I!bQ;!AiUI? z1)28eoX*Hw5jIIj^UH*Fx7dzXHf9#f+v>IlED4Dasj&B15Qzvlk`yvLEKIolnfBQ) z1rP?!5Z_B6AFKuk_EWqs-{n^Ruk8!~Zzp&MA;3HNkA@5We7v28hF11gAVWiDpa(R> z!)t^E*4lcYOX`e(>Mn@FPOmiFkIxLN7KL!FL@qd_`Zz7r3p@QFNkIffd10Wz1B%aK5kidpv0 z;kZ8;^cB~RpXCAeV=Bht>-&}F<&GlgAXVXfk|S;}>~U6LvF`oIHv23}YmN*Zfbf1+ z9@KR)N_66c>Fa&pZFqlo3(M?Mwv0K*NxazH6;do#!0UJ=tgr)*8kba$0FyjP2u&3XbpP38#DRbN8_F_9MRdSRBHIYTYeb z`|$UseK!y-`883?*99{$U7$}UjNYd6M54>3HpM6{S?5xA!KQ|8cAVDq$Q>iX!Y%i= zNp{oC=G7;t1S-RK=FEk?^D%8Y9i)@h4M1Rau|RNcSwTP2+JxFIApfwe*HJ>d`4>KO z>Jsmc^rEpdO8}a~9r<(pAEPTm(pP($JOB}HyRUr9^i3V7kjX7RgKF1!*aXYy&sf!2 zu_H-w07`H6S>MPs97CYD5Tjtqep))0;jj$0=ray~skam&8^1I67mbpfJ?y*7skH_mSQf9SxgeSgG7hISXL4%g{*SAbAQbsXt zf%lq5UV7*4UDL>ucTlAE)FW-5VceH)Sd%3WwRS*_ox@xKN+{uyG+AAOY(G4*1T?29 zlC0iqKf~p{E*ik>RFGK@JAhEKVc&$Fm)(giDCz9fCRnf2ZbOeL$q>%etd^u{LbV^H z^J&>vlZ~O6jtk%rwjl6*t4pNt|?y?hQxlp#5#;38><>m z5`!Dy+1~iUio;NPN-#@L(Q3!e^&BMeStE6xoOpSbcs}7`H1LF|&&%)>pjYK2VMVWk~Y3N~^iAts~fqK`A#mRC@)r z_u}w2f!W{4bZ|Vr+n3zkd9`E`#2u+fUHHdR7Ri+ZIZDmaLRji7Y?A#$^WV;72nz_p z=9d=ZU8g4JyI~RZJ6Rf*hOCZ5kWF%1Hy!j^M@@G2daWq2PB1okgnLCe=65gnl?d7z zedqZ#AwsvuMOp2$2x}BeYEU`zO8k;T-tvR>!65=Wx;e|%xgI+Rn)w!%cXA%sg7SIXxv(Lvz7(2FyCB(8@30st2;lKUSY-G(%Wu_n*w(@AM zzl|X?(NSnJc#cEhhv>;%odAw5R!#srfIY~W8RQP4Qk1t3Vntrqz-ktOt?f@UInSnN z4y_41OLt-pAw50_s~3d-&IW*-;Yq*{1_0T+?1$0(F9Yda7VGE52z-+Va~_ zgcOkvMR>rnZ!^7YEJ!yZPXJh~)_7gxsanL$a3um&%IxeZR)L+<9+D$m_A7Jd)N7Jvm*Xj62=) z2!|(EHz@3mKHXOvOWO;EvfR~Y8?9eu6K@(t>beGO9)1b+iP8`1&VMx!U=6i&la~>C zs{_C`s0XEX@ZTkLe}JBSGqPQ52_tM@kQhr?gXueXdymLP?GgFhFQR{7D3(*+v6pxG zrvp<|-gzb~5v15Y^bk*+$5kSM>cC|dDv2jC&KlMj;)lk2v^3t^DjF!P8nVjsO(m>W zT2y3#2zSqGkQLOVT4hUZsgms!AMe~&&5LG4{f~Cy2Dqb)^;3tZ5FGwAK5fjz2x&Z8 zWi0mS5_V1m+m2Q{fY`Rpl9}FgQe}v~e{pOD~l*Rjk6={qb zA!$H|2a!|88+-q1k1C-~OKFDOADzk7X!MG_mbOV?&7zGhKuuvfcw0B#t3Z@}vIhAZ zbYzqM8Cv0N9Lal|bR3Ti{r40^Mti9xkH}mtE(WbMR(>U4v!LGtTn^p1D zK_Z3bh;+e?uwsQKVrp`U#~!`ro?9Nt-MotTyS)AWj?tJX&dr4gCuKCev|M8$ymzvt zm+hYErKgCiE_}Dy*M*GzySRT|UL#T(x!2&Otpp!O@+ZgfVR`?dQeEuLz$+a5pJ#JJ z6TYz%W`zSO#Drm~gVpw{A909}et5ewB_BDx{3oF*l&-s9v~#j49EvU#iYneQE7@VC z)k5r1Y<`PS&;7Spt-jc4pSA~myTg={jDLo|+QgEMNc{75 zdU7vlRCS~Rf~|qV&)(e3L8X0ZfWcw-#ZA~5>c`h!aRC;uX4s4aRV8XdhP7<4H^Ui_ zG0qUZ88s3+Y738HQue$xNNZ;*BnJ4yMJcgi}Gv4 z`QitfZ^q4RENg;rnNN6?0hNN{vljcB8uAkDd0hWElzMlcG=U$!Cku}a6td2DEPcK$ zL1arYEGeInjFII!uWOA1Y zo2d;eFNt*_hC&J9gt8Bo`c3iupJj4NftH^RER)91ARs9JrA%77SXfxuTbLW0{=N@H zJ(!GHa@5#oZA#*hq~WD)u&2tlo6Mp{wCt(q2~;t0O(q>+v3!%e#XR1I!*nbt6vxh4 zx&fbPBH}AG0%_Z0eU2hA#XONlxCX&-Sv!$M6{>Asf(j|*mjAqAT#4{YzWc93s(Ry> zyNTTRJw+8Eew8*Sesu<6w+gV`9W_7Nny0ZlUy7|V)MTV;-60U)iiHRzyo1Gz!RzMz z97CIGkB+E}9V~PHmS|d<7`VXuqTE_v{#&JU6%u*bV)=sIXy}5 zupI-2Niz`T!Gv`N6Z6{p5)<{L!7&DomDlWdo2;Fcu-PuJgPL#)>%(H<;YPEk5q(t! zpxM1bsg11Y9mRUjMa)&i1oB3#pk&$E9SpJU`g#kt>;W?!_3ROI;`>d&Db zsHQbs^SO(S_jb7B){11qze{AkM#drY#6hrxk)y zh_hb)8?4S6M|DgVGciOM}SNq`rUO!Ay- zxw>(^w-;i})yD1XuGKuq7bm(7KQXJJ?2vWLhy`)w+(wJn)mLNCVyq~IJWhgc5G^`V z`^V8TFDn-8HGyJD5EUGWBVAS05nW1ie*LX~7PTZ~G5%Drm{lSD)x~;Hf3~)EU=d5z zuydH>LciNLY-5vrVO+PHvx$Ru=2znwYh?9)=0^oN61)yyx2>h>i$b1{JNGTwlyh-= zMl+|PSv^e~KMveQ7(Ym}s$9>bSF00B=K!xrmJ?-~W@~47IQd|kevIx+Y8JoFb`%N8 zzC+>Nj;p@AT2B(XTD_izyz=&$YNg2MPKwfUvgu7&d+AF>wEt{Os*8S?3tk98L76xy z*B^%0YUMurZKlBKumTO4wmIb%2A>Oa?SiY+F$OI>a_22Ki->fWPhF~?Q1&X0AbYh1QkX9ggGoaL8+niy>4a!s620th>{ z6{6*xUxf=X`e2@W=55GXr)B)=&0;pgm&8D^(M!e(SI7&{-ooE_^CQJ(tf-PxR^?UxAa`Y0Sam0ePKTY~0NEw-dKb!?fEy$4io~I+bS3u`2l) z0mK4kpTt4ZP?4o%Jk0FQ>M~Kpmq|Je|qBv%U>w8k<$Qf89w~9`)m}ZrtTx2l{#;Yvs-NqhaQ|OyaSEyHPIg>w$ zQKtJ_*k&)y76R+&4+FmO*{75r>E$|^#QO2Wy_7Ii7yl*5t5b)YaLC-jto=1^pFqcS z=Uui>S^<^j!Fr9hsgtoka;)9}*|bo`2GI!%xe{*3MCsD=&;xDiR2epT`D9zkg?3!Y zi1L%4z}QQn`ZPm?PMH9?y99ribuxx~dVQ>@o=6?ydDTdS@LV^-j0q8n6>Kp_1L%rE z4V>*@<LW4aTQ}yRUN+ zI+SJz1oKCz1Qc`MHo#f^YWm9X4l^g2h#7}rwJC@>e0{86xda4BLPVU-m=s!0xB4M} zQ`<sXmLeWJvdNe3Rk2M5Ks|m zoT*9`0=M@5S(!{CCU@|0N@Ti9yMpj_K=m-r35MDQWV{_Fj58h9Qh4#R!Z-ygB8HB+ z`UZ7(1sOTCOZ+4j3h2U4epL}+Z|J=bGA*}j3z5)B*!b>Y%I`1OjeNlSlOH2M#n9>P zR_e^gb3`2&cFoS=lvMqhxx*x+fvjXZoQtppMJvQo^gz^qSu1sbA5N_R^|NgWOG&r> zO&Y3{8EBNI<8G<`F2g@HlubAi|HLko{bzYtL97Ki%*d>mo zdL1V&ztyT)x7L{%zs)iB#pC2uD(eRUu87LvCNq3{;BkpdUg%QLd8wlyA6h&xk&%Av z13JsE4c^KF$xudzo`F$W_jx<~_j2xNi#_j0hp~jd?8>k2V3nrfb-H}R5eR8ZBzdjd zbsJ3ibM3RIvs72@CI$g5pDACk0}Ixa>HAu2(Uz9EQc}|Tu39s*B8$OFYt;S#spcJ7 zoLWlBx9#EdXO{M<=J<*l9c8M+v@gQ>Z-g??w4Hx2NvI(mjm^SkQYG?|YRfXKtoAh& zuuF%A5(yAN{`i_t>PW^e;@)M;aJeMo7Mq71V-f!D_6?%Ro-hpM^>xi$p6<|V2G@>| zwk?jcS*;1GkJqcie!lnX;Rc@p#L-z*itn!?_CM~~>j0n@Xq7EGnmK1p^Vr2J#4nR~ zG&4I9ULVC{gbp6X#zbRd84!eC^nyS)gLn~%M^-l};?EZeHk!!>J>Jd2%a7(Xb*H3! zY>AC%IIU|{nqzNgKv&HYqX}?SBo8qP2gvTh>PM62d9=>h(r+rqXsxMYY=6~N{qBv^ zx4Mn?AOTMU>Fj@&047#uc4jVSGh>i3%RfFIU%~%>kp2Huko{>$jPHN3)&DPt%Xt15 z>+pXvl-6HltqmGcSRaRlfJi|5%RT9jGj8P!VzRO~cVJPJR2P*HRToXrJaAa6!?-PL zHQCN^G11bQLkL&Lg*z+tX@>C@x|l*hCqmUP>(sBzlhDYSGRS!2{Udhlu42Jv7cE31 zg}dXU$BB$jtU-Kx#Hr>CAK+8UP}TUF)C4}0LwG_QWF zTJ2<97BQ28d~jju)~7gTYVG466YRAn2yYTDrq;7ZRj-PYEm#Tl70N#4T2p>8?3=XY zUXk2j82<^dJM)@EXKvG<(;Cm**-f}0?~DV_P)v3 z1ML_?DM)iLF3)BIKwSA&N3P$Iu+_qM?h}Av`xE4JmbPmFnYjIQfynbJ_fntp)nbe{ zUYskZYiF_J)8a#Mua1RNt#jp9f2H^eZ6LGcGUxyVKYxlwsh`Mqvq3udxZ0E^+w-v^ z`NVXi${Qp3p$0*$TslXN?U@YCbpROwiEq7P?00tHGZ-SQq!xa=ha$ zOAJ?q>I|4VOvxwWtiAo`hZ~evA$f*`ob99;MFvpY!)RrbRmUhr?jmJOLukszTML%q z7|F134wBiIS6Dw>eNW7_ODK}SxkX%Pe=KH(JlwrEP zBTTiJBxsru!Hi#Ymfvw7kHhnR73v}_!H73er&J8U(Ge=*b;9`G1R(9=(=WiuC9Z;o zTMM*07=;OyGiu~n?)%It(NoWrI}u$&7c{lA)Z3e7lWSmcTBkPqg2@o$gI|>i5+9ejP;fCQd)+5T<#n>{w|9N6S!olSO+I%foHl2T$*X}S859bA_A{0 zd3m@SsuQIv-=XmjtVo71en6}IcE79cc<{y2rwae4=tnZ)`D{ez&`PXsgWPhZuP#FDvij{t`bGtJyv^y@8| z0tcEWsvzITu0Dsi_2txcNwz%QuPPOX9Ssjc?Iou=E*H@Y1(amIZ;WCr8k0d4!G7jm zlHIgrbbHfW2y^ZC29H;fI&Izf`1|wlLEo|qI6pqzAyy`9NT-x$6p$tVB^oRSVMPDo z(R)wn%ku3q2cls-SA=`XH+tWWVtkA2!y+b}$Ps&onA-*dV21hu^EL__J{;L@CLIkpuTVKg@APs3tHSN9}LC9+hp+rwG&U=#2d+E^C5)Gz%J#F#&1=Koe}q=Hb$u-b$EJb~f*?7{D3x}bFIXVPYU22$+AyM1 zOTr8@hD!K+_<646)#f{V34&ovMmd|byM9Pzd-HeKw=Oi_yPuT1z27N5L3t;gp=qA! z+Qx?x6@Zz|V5Pjs9{qj6elUaf15;&tu2zug`5Pro%S1X$JOJh&{8kK1F3e2E}bkSi6np1!GZA{wp#mR;hP-Y%KdjHnw& zi~f9)LpQ0zzL4UNJ16djD`}0S#mq<~^VXf8wTPkgW^=AR;cD$L>^RPsN+Jh927*qw z7(V9sAb$yZBJYhRsB{*kZ!5knX_4jjd76eTd!#Md!D6l2407JHbR-9q^KEy^CqSF! zpm)N)34)3i3)WL!yUBI?<>GU`Xg+{Bfi@JNB?5^fjW~b#0;uxp85X)&=1a|}#>s*M z9^cV7bYdYdfRQU+Be((M*Qj0zC}|4M?Hva6&M4B)S65FQVf5}lceBaPP!qYkv~g>p z_SE{2uc-~=19m;0*%5`2G#y=ZIH&Drr&qf8-Jw1>zQ2v)4XT&C9@zT-ex3xh|6&w1 zWh5mPkcMRx2S-ulndpZGhO1RsCtfZ&y&9Bfl4I;;sZy1Z>t$pPV}UDF8DSlH$u{+J zWDT)zm}T^gaSDZ&Np7%Lp-PpOj$UycQC6W|m9~ImYOsG;eo}Fucx?shQCY+9p(za? zDzbow#R+~G!OtVV`!7QS=k=UC41o?-|C!&5P#PL$7#fz1Vo_r5W2B{vUWYn9c8O4n z=u?frvc(qp;tv0cg|3oklD;y-59vwA07L8vRB(sjata8#f73C@0cdCoa0S@@&MJa? z?2TbjVxpI4`pawxi#=qi!S_o)um?i%Z=kA@;;&RCYA5;=vt-a=mwbegwBVttKENP4 zVW_WB+w;I9&ho>8mg53r7&5>)o??xIvgqDq7n)Zg5DI$;cM|@4u&C9CK+YC zc(p|S4T;Vytw3C+!;c_O1iN5%&hBz+^0fnI@nTa33Q*(_>lae9r7=9i+YaDZP>58{6&CJtL!X>1JnCn@kn0@w?w_LH7l$4GZOZ0iGR77*H%Dz>W7WUY}Eho3_;G zd)2=kde``ARJXh^%kHHu%DNtPX#~7EJ81+Tv;sR7Je;n{G&VMH`?sO#t zCe#!|XCqC3mcI!zY%B6pi!oljB8IV_Zz#elu0~X}<;W`!;8Dc>GomRY4ce3VfuhTR zFNsjD19LjDFWV@;x3=HBx*e!4kHh6L^eSw?y!CkH0)PIqB&cE1rp41Gg_ozljY`L$ z`gzsR>}m_+eQ?T!Hf%9t)XKE5Um~`ls~q(~4Uo$Ktst1fHD+S>2T@R5gZdPoHX~Db z6q5;GZYyFoZ69upHDCZp1#_<6Dubp$ban9^vR;KeM#$Cbpl*IKV@9k1qlRptMFj4x zk$&@MED%G^QS8eR*VO$FO}YIG=g)FLy#$|H^02KeHRS@YKB1dC1WKqr_iR^;gO?^v zhr2F(cUw0WqcB*H{l+LO?ToCQvSjSKBc50VllW*jgfFt&9L0f`Xbwh%QAD{%l!~7PRB=y<)7!ad^s=}I9=A|?seU=dd9F8zr@3I# zD>wz+@2v|pt*GS(iB;ZGDXSYi;p$pAC^^MXc@2B(Yj_15P~mEpj5q`>D{f^7P7Pl$ z?XIu8vS4(UzkyVihlIj{{?{dO;G_1BmgE2X>yZw5R3P`5@ppAt|0F;_j44_PS!kz{AZ2UW5Cmk>hDzUA1%k99IQhg1O9q7ea!m1+K2}h3f!Mr zPp@~6Nq<+6@IYF5Li(dT;W6RwQq&IwOvFEruEmrf#nEJQ9NFw2iBwP z>{Hy+RN4a$25b=i9{1)RqF;~sPi6OakmZk-Q3?y2MSl>F2edLV1O u`0oe$C(r09_Nh1Y2e!t)_}^gv=^H7_!+>?*?{e4Y5a-}p3=Epzzy1SlXYV=y diff --git a/dist/solo_server-0.2.6.tar.gz b/dist/solo_server-0.2.6.tar.gz deleted file mode 100644 index 83955ad12228112e8bee93c05968fa7031b7b547..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9031 zcmZ|URZ!ePw;*ub-Q9vagkV7i*C4^2!Gk*l8wllT_FRuNDK<^FAels+A6SXV+0i#i0N~bnTcW})*GlHqF+Uz zls-n+Lo=5``(Ch3JFl`P|B;J2yRJp}r?sHJD4JAkLZ5c}?j?$Du?64VXWSUWsOxTT zhkYwt+C(gx&6|Z2_^z*NAUploXaeze+>N=uunos&iCsvbmkxYA4U%_u`Pt2<)uh0; zYu(x9`~9Yat6PwaaFOS;#T;Xw8eFDz?r2#FVln~Y0kZdi+w1~gr3ql>J)9nLSJwi$ zKR)`I1k`C93P1GqEV>%(K`L5$UQjiczTXIz2KP=BFI7$CT<}j; z6M$}1KjOFlcsAS<6<=q23#LGQjS#u%c01F!M>I;vP=a|kvgb|5=FAB$(anxDtS`!F zq*wYbTKba%iAD>OFN@DK`j;l4pS;fBIpk@mjIlRJNWSL5KV@lG=GQRhliK(zGLy@| z?4X}N$j-m5#QJ|mfEsrprU`wUK45gW7;sHP<3G!!HhseWBT2`Wf?>pj99QT;E_Lgc z7K0T2&FGD_sHS0lkOIX4Ll2z*W4h7Qo9jw19}8AG^f^J+U8n3BjGOzFi#kMV7H;FG zVgAC@pIyV0Vd$U-Z=7P48Huf3l^r?qT;`YF4`i?yH zfP;9u*WF>yV{6uuUaD@~sk1C~c2^g@f+a+KSJ58#nc>6%eOX8D{sj(yIgJAT+39 zDwKK_ZPe{Maz}|4lUk%Ov8p0n*S4Gkc4TLC%#R$CS9C)v8{(D-zO?cNpidM5NKs_M z{zQXTC^CT+VH4e-{lnxW-FW$jCRJV?Ou1YN?wcb`sj{N1Aas%rmqj21m5@(Hg0`d( zS-nom4Q2YtOh;_!%9$|{Y-Q&gG`=pN^VZf_F}%|Kjuq}iCNpJu9ff$8ZkRRdfFlNF zffPNXXIV3lrxC)i` z1_M_v`7KF@L~EE>VX6U6IrSehZ}E@t=Zv{3^y1lSX56lsqI8i`RKo)H6C39Q;`>)p zXdq|K+d9X=bHo_eL2sm!PaT;kOC|(<_Bcm)D9MDo9)qi5ME< zjdB$-gErtUZGs=6QA`N?8K+2C4!bFV5;@kX{dW8|4PcO`ii#{P3X;4R5ho}XXP6%Y z{M{ZZ=u4#RFo~GI9$_d-B!(*oQVa4m+u~N2Pa(VvKi!r7y4E4BBjxHP&dM=PKF)_iI$fh_dw*|^66FrmZ1UI|n*B5K_%Td~6 zbmTg{~uQ+arGSlJ&+u%&~bPwCEFUUKUC1*Xn1iwF;AJWK^?>{;zGvK~1PdTG%i zFeeV79L#Z`9ys)ooDzHqwXjKPX!IfsYMb9AdJYWviVVpLsnq@Ez8Lcwd#(T0B|_HYW(+nH8=Q^{Mk7KU zCMyV2R38!wJUJp_zD8oaVaed49!_agDfXv3&|R&M6nCMJ?PL(wO>dkrV|?)K>9;J^}!o2f? zw$DAg=KU{O$n3?=1WbB^-wSU~NRF z?9XGC*nyzU#jiuO4b#axCB*^_RtqAch|NrvG4OcGn7_mUP#w8b79lT0IrJGr2+=EEPdd)Yv_>3yz;DpRRiT6=^Fb2W+Km;!S2pq6kdZnBu*uF_FXyS$Z{ z4#dW9GNjH)(5A@>+ang&DUh_>lUtzED!i-YtWVPlaLGRHtMhHL6=~VOd)*2YUi;l{ zT!WbcIiD_;tN>`s>S^;VylZZC{&us%zqv)lB=7$neqVhP{s2gy0c`_7SI@T>S(7j+ zwWs9S0cU%m5Y8vfrq$KzN8mfY3-B@SFh6kwS(k$^AtDJ~cI=fTQ%Jk0^zwhPpX!Zcx!l$z`2i9{eVCu7V+Ma=>x1HvZ!E$LC^uEIK{n7J5a3tGZz1lOreU!kP7H&9BUgTLwXRj7Ks z`W0QxVf(XGvM6AYMo-7|jpV!8g=W)(6r|qyL+pn~^Hi!_k$%ph9t9J}FZxS0b==It zu5LO4%P(hK2j-3^rRQ?G_9eQ#>t_9)OF}=`scXt zxVGX&Q!KOIPK{~WWyEzKS|)ZlG=7}v$hBIErmf+SU4vAA{rrTeq(+ys9`e^Pl#d>J z)=`S8^u{5^Hz!y&wbfEsho)ZpW3f)z1ISgDTnu}Vx$pP!8rf*fd5QnFtEI`u< zfIWJ4-F#^UK0Nd_@4Zl-LFy9ixUPUG2#g;tgFW{I(Y%y+b>yfPOq8Iww!JX16sdh& zW(wk>V_{5;z<=F|LIu@G+NdP8A}7z&KmA(;zQ*rko3C=dIQnxMo&M4UpP-+4>{1GR zF|SE^y#2EeGk`v_y-40C6dtIHZ`*22)Vs6E)7R4{Y4pNi(u}x<)K=4ze-8sD(>mR6 zQq9B4=7emlclXX1OTbo%k<=|K-OuY6qebO1TER0t9tTMA*W_t`Y$nrDK|(D0LQIL2 ziBxp-vZO@!)NCHRV*0is7>6uH0!)(Qb5^1@n4S9K3pvHG<@33}$!h1egJg?5Vi-mt zmW%{V#n}3f5^f$7xMR+vhB<*L@x~vpXW(>x(*WysTtwH z`6!bzum$+8b-&z5*8#!eFK$geRsZR7ytwgiNok^mC&Tc1mjNdaIJ9MPL;m0V&xOZ- z4`mFOu6y2e^%HJ+UcQu02OQO2h!cRWX~+-HecU55Ppq^EwK_d~%CIMc`ghl)quu(9X!xadhc73@rZrlM5z48R_=pS=^J#ew(zW+i%Z8#1*79Bg- zqAC`A>qc!3F9r_jQ`Guwd-;n9R8w^QyFc%bfZPx_BRZLo5rVPB7%M3XMbr{VX$<^8k>1^7;u!lviUn?>5mUDbBJzWgNe6eZT4f+Yszc@WNCC<45?)wDDbYLFKpmesMNyMK=`o3< z9qo3>uM}3|-xO?!=nnp8U|jy8D2tWFmQxYh;rljW0#XW`CEFk6>02N%Ogw%y#IYh| zJg)^LNWtd`^GG{k>&(NxD^-0ki1s_c6&F!1L_)c@?RsUE)0V~Z$B|d|gO*)k8p=%4 z$fo#JM%1TII0jP&VqVYU7!3K4?8o)73IPsLk3xR}%JPi5q@DRef39H|W@Eor3HaYv z%s?@N-<);qo_9S|9tUV&<;7O6(7V=7pLNuJLLjwK5b(oVCsJiz$I7rmGh+-aN~JZB zcj`yr`f_ks95c!iwl=6er)@gti+r&39pQr%Ma-%X|L1|dHx!jBWSwZ8^3dLv7fl!t zqJ7(E*VXjp4Dt)^cLL5TvIgv%atZ(^mu7JW>|Z<{Kwc{#iTDyW+b~NK<~;a-L**%^ z6we~lYEG)O?3z}e01kicFO)ixG;olCXj@hYqbWhtd>bj_ZwjN1tNU4AHfsv3J}f!?!NN5sbGDormY2 zUlJ!&C4rL%izndt0dn(faRYibx&?Bn>*^W-x2}%$r7Iv^f=&~q4tIjcK8okO>vUMQ zZRM0kJh!qtw3Amh{AeHK^;1kETGS_QOM>Q^QT}~ zV5D*+YC@?@Xt?>)rXyqXk!2cQVxN}lcMG*Bd`WaB97*AnV*m1xCAwxbh)Pw_pFY7m z+2q5%u?LGH;2D|+7;FMvbFROi{vEQGC?C9`lD2j1Fs!A;s&%e>D+v3W~q_?WT zY1^HFscmSIdNH>9-YytSGCb32*xsU??C&k^&C&5YE$?s!#wFNNlOvqaO_RMneFNg* z(F;2^C>g}4CVns)=oow9e>aKU2qICO+}DWO{%UAqsIc=Q-ec9{B!ZgV>uspwN}e7r z6gI$b#`xG1QK^4kz~Pl^hI$#I{gzM;KaVmxjQ+KrT8>Lc&cx@@9_ov6bbVpxhk@U%4o%pwBJ-tAjZJY$D)uB zg+~iAv>Q3y{=n*)YQV+C+3Dq?PXi!E4~x5fI%U@ymGdCf{>pgH*?P}TODKtr zMrr50st1O20=v~zjGQWUJT>d*YdYmCct&P6A+%Yf(FtP2 z*nFx~Pu-?MuIV`8Th$)#?9V8@el?<5xV|xHzWp{Xl|UHYfBv@vALIC~ge$93UBH8hwArN$u&e zJ-A(ssw;WdN+(LEnEz6biJoP+L}JW;>n__)-MFv=3CU>B_Wy)=t=YOT#_r>1Vl z_C~9^8h@D4(}E&d_m+~pZ2=lfRl6-~g-gicvuWD*&R|6S%k;qK%u)WMEHq;~jSgl8 z#`r;>^M`LX)|qK}#D>Ko%^{qw^wBptl@X24yB{Zsg?f!wqC6FT|ap zA@CZQs;l!=0Nh;Pw7>!8nb(aNN8JHDDeD-YqfxmAU*5Yxl8`MTQl(Dl)Ah4YZ!FH} zZ^x3A+%!k?%6j04hzjy`6nGNiU+6paQ>aAfG!S0c-WF+@=8DVgzWZZv{;`=6t5Nv0 zbur+>Sq+FcE80R^@uvpDkDN$anG}RVgOZ;ZoK+ z#c_56|KKlkREZCiXujbk(@xl@q)ko~9@15blaf9{D9d&iNDe^)CE zU&6Rub`zZ{3>vWlzAWqJ%wt?qU)Gc^{RmeySY95N;0W=-$K!xTbb0bHaXgg2r|lH{ zP9B&CL{bQy`s5Z_Pfj)#N)q6ff6cz{15VhsWw5xw?X70Oft%By2PRzGJNnL#&ribw zUjzJndOMfllIBBf{WE%*x0~|74e=5zp~mgAn~P`*<-3E$39l^$A0eVle2!_N5O*zW zZ;m}cW>bEVUnA>9VJ{meZ3;G%*N=_W!&z!OWf2~=BEF3~_2xeEaJ7OWaq+ytT5-TU zlq(eV0COyHWi>kH-VrkUU#qg+yC3vgs@iYZpJe0i1#3YgrZ7{$!9am> zce=ZM3We#X8b*0!i3CBp>5vIegz1ahJ@9w#Hzj=Nf#|CmpL?YU=1%!j=|d?8&0C4D zr^a_zEgyOmO}j2RO|Y$leFgH9D-x`67f|V9FfZ`v>UsjR-}I_Y`d!{gW^Lo^l7nPx zUUpudTB32RqoZbg z>5R@<@z)K-jS3YRu8Lf@kqy*%&2V@+F6#=jW$3EeCu4 zl78rgT=JWkXlK3bdLEUAm*&3uqzW9s9 zK)A!Y(_O$+htG-p3+@i4&UzeJURxz&VSv303)QL*iWW~$K0%stbhfRp)G8QmJRgC& z=<@CUAne8SUycOWXM#^c>W`lHhqON0>)Sp4`JqkUJNL+c1|6J))EpNr0nKoO1;E_< zmrEV+;YIT|+Uc)za&Vah&wr|}dEg_o7*dS4e=4z<1-_g^VSRZl)Bd;|?xO3YmRBIH|B%J8Q`R|06QS*=%Vkt7sD=V$9@<()O5 z6I3XvTp7Qw-Vg@a;70WxzWb{wKXN~EC;R;B9q~PD67RN}Y;=`USqrvL=J%@hU2Zq+ z-#bW0E{3k{jqRR4&nUJW4V*VjP`(nZIuo*d^KDA1>msCO;*1|Di$Skp{Hqh&nwbIr zQfVj-FG>R1nBbdsR}(HhSodo;ki5hZvO6W2-Letj)h+<0u%rbpyi$Q*@GI|$8ugihO)d7n9g31DtX>%8r-x(LN;u+V-$#^HOg=|?nV+Dk9Q@ToBHRvqwA;cz<;|YA(SnUU(vZomt4^P zw<$4hgrFDyd76S7Wtk$0VKu4UYkW!042^tmMrPF!@_syo26Z<^qXNSn^x}af8N)EL zBd#+SH*(NsAwjh>p(NLwn~eTJlCIn(f3$|fN-g9f%`ISMK$3MmB+Q_$F`cP1>`mwg z=pU(Y)O)(*^!iTV*>D3)VPXYsi4W{sOkh0sV71^le7!~!p>FqPdA4kNF*rxmm^@36 z(xNL!v2tW2#0__a9X$6-WWeC8-h=fNXLvbC9BsDTp6<_IW|_A|Dm$aUb7p(EW2yap zD%LWCRD$U>gDsPs&RNsxHmXTBKq1`@J@y*E6IqBl%NL#D>jtd4JAQC}ygor3BT{PR zA$y^)Y`DWBMbzya>MQ2-6ZzXvUEiCo+K5L z$|4D^X5GGB?rOqQ9~$~~L-p~_3q|zo7r4B~&XT!8nF|Elu^!HmNS0P{G~lCN@k`H! zmYx1Y<%Md`Te&#o%d$TsOzYh>-GupUXATn#vRIF&S1D3{XdaWQcY~H|5{%PM7_W+C zlzsLw2ycSmG^7*+NI_WroZ;TG?>ENZA82sQ%0yU)JBQ_u4vosG&#uNC7E2%~Q^jMd z(@?E6!1z>rWtHsyF9lNfA53YSNgLD=MT#E19h}B&Fc=tatZx+i!cWy$NF zGlI{t#@bG{&(p7!!;d}J2IX?HR0h@eeFg;z_5SgW^3A!x>MU+vMqpE5G>vIPHVRqL zZ#F{uucFJ8!#m_S_Oc$Oc68SQPq<>Y?o8yckkV$a3Ivs5j!{~V+U?TTKyQEFM&R;n z6~5heyI>aWO2c?J^4ooUs2t1ct$K;xxPXSQU8^#}uQ2@#OnUztn@?(%AX%M;KLe#m zOS}T<1%V+AeK2W>GmC|ViO!XeB#i@+KD6ii*es`EgEhN!LTI`98UiV?Ha{o4eXIC6 zOQ$l$n~VJv4JNs6iC?EFLlGDbWrD=Vmmwr5j5nwL^UC(P3Lf_gAxuQfc}V;EZ@o7X zlvJ>McucQ3M6;oL6`l_bzWIpx0X%;x(%W+E{-@>`(Hxdz36Crr9sCr$CTg=Q{spf& z>-!gqEHYjU=oF-18s35~-U4%LE43>SlXq*t^6h33Aoly>wC8vDFDnc)`d}aqFYLXF zLS5G=1d!55$| zmrFVm9?fZ~0KgnvPM(0my*W9v86W_@eAQP7+&n`@o-ND}A*xTB#C%UYGr$>M42mQm z{XHCJ4D$F|$}&v;#iiN&uXknQQUNcn91q>~GxQMhuT*r{lmo=Kp5apge$zmKoeyBI zGXezUDv3)`AM)mzQOtL&5^PLFxoa+^f&TY?AyOCrFv06(>=4ke5C_~`e6OE@!RzHR YbLq#qKS~sV0ukZA0M|C$(*OVf From 4052dfec769d9bdd8526c7c2abd028ec7064dd1b Mon Sep 17 00:00:00 2001 From: Kabir Jaiswal Date: Tue, 19 Nov 2024 19:48:17 -0500 Subject: [PATCH 6/6] locust updates --- solo_server/base.py | 96 +++++++++++++++++++----- solo_server/docker-compose-benchmark.yml | 38 +++------- solo_server/docker-compose.yml | 8 ++ solo_server/grafana_setup.sh | 0 solo_server/locustfile.py | 34 +++++++-- solo_server/templates/llm.py | 72 +++++++++++++++--- 6 files changed, 183 insertions(+), 65 deletions(-) mode change 100644 => 100755 solo_server/grafana_setup.sh diff --git a/solo_server/base.py b/solo_server/base.py index 6096dac..18cae98 100644 --- a/solo_server/base.py +++ b/solo_server/base.py @@ -2,6 +2,9 @@ from subprocess import run, CalledProcessError, DEVNULL import os import sys +import time +import requests +import subprocess app = typer.Typer(help="šŸ› ļø Solo Server CLI for managing edge AI model inference using Docker-style commands.") @@ -83,36 +86,89 @@ def status(): execute_command(["docker-compose", "-f", docker_compose_path, "ps"]) @app.command() -def benchmark(): +def benchmark( + model_url: str = typer.Option(..., help="URL of the model to benchmark"), + model_filename: str = typer.Option(..., help="Filename for the downloaded model"), + template: str = typer.Option("llm", help="Template to use for benchmarking") +): """ šŸŽļø Run a benchmark test on the Solo Server with TimescaleDB and Grafana integration. """ check_docker_installation() - typer.echo("šŸŽļø Starting benchmark test...") + + # First start the Solo Server with the specified template + typer.echo(f"šŸš€ Starting the Solo Server with template: {template}...") + python_file = f"templates/{template}.py" + os.environ["PYTHON_FILE"] = python_file + os.environ["MODEL_URL"] = model_url + os.environ["MODEL_FILENAME"] = model_filename + + # Start the main server current_dir = os.path.dirname(os.path.abspath(__file__)) docker_compose_path = os.path.join(current_dir, "docker-compose.yml") - - # Start TimescaleDB and Grafana - typer.echo("šŸ› ļø Setting up Grafana and TimescaleDB...") execute_command(["docker-compose", "-f", docker_compose_path, "up", "-d"]) - # Run Locust benchmark - locust_command = [ - "locust", - "--timescale" - ] + # Wait for container to be healthy + typer.echo("ā³ Waiting for LLM server to be ready...") + start_time = time.time() + timeout = 300 # 5 minutes timeout + + while True: + if time.time() - start_time > timeout: + typer.echo("āŒ LLM server startup timed out") + execute_command(["docker-compose", "-f", docker_compose_path, "down"]) + return + + result = subprocess.run( + ["docker", "inspect", "--format", "{{.State.Health.Status}}", "solo-api"], + capture_output=True, + text=True + ) + status = result.stdout.strip() + + if status == "healthy": + typer.echo("āœ… LLM server is ready!") + break + elif status == "unhealthy": + # Print the container logs to help debug + typer.echo("Checking container logs:") + subprocess.run(["docker", "logs", "solo-api"]) + typer.echo("āŒ LLM server failed to start") + execute_command(["docker-compose", "-f", docker_compose_path, "down"]) + return + + typer.echo("ā³ Waiting for LLM server to initialize... (Status: " + status + ")") + time.sleep(5) + + # Now start the benchmark tools + typer.echo("šŸŽļø Starting benchmark tools...") + benchmark_compose_path = os.path.join(current_dir, "docker-compose-benchmark.yml") + execute_command(["docker-compose", "-f", benchmark_compose_path, "up", "-d", "timescale", "grafana", "locust"]) try: - execute_command(locust_command) - except Exception as e: - typer.echo(f"āŒ Benchmark failed: {e}") - else: - typer.echo("āœ… Benchmark test completed successfully.") - typer.echo("šŸ“Š Visit Grafana at http://localhost:3000 to view the results.") - - # Teardown - typer.echo("ā¹ Stopping Grafana and TimescaleDB...") - execute_command(["docker-compose", "-f", docker_compose_path, "down"]) + # Wait for Grafana to be ready + typer.echo("ā³ Waiting for Grafana to be ready...") + time.sleep(10) + + # Configure Grafana + typer.echo("šŸ”§ Configuring Grafana...") + grafana_setup_path = os.path.join(current_dir, "grafana_setup.sh") + os.chmod(grafana_setup_path, 0o755) + execute_command([grafana_setup_path]) + + typer.echo("āœ… Benchmark environment is ready!") + typer.echo("šŸ“Š Visit:") + typer.echo(" - Grafana: http://localhost:3000 (admin/admin)") + typer.echo(" - Locust: http://localhost:8089") + + while True: + time.sleep(1) + except KeyboardInterrupt: + typer.echo("\nā¹ Stopping all services...") + finally: + # Stop both compose files + execute_command(["docker-compose", "-f", docker_compose_path, "down"]) + execute_command(["docker-compose", "-f", benchmark_compose_path, "down"]) @app.command() def gui(): diff --git a/solo_server/docker-compose-benchmark.yml b/solo_server/docker-compose-benchmark.yml index d27c00d..af12c24 100644 --- a/solo_server/docker-compose-benchmark.yml +++ b/solo_server/docker-compose-benchmark.yml @@ -2,13 +2,13 @@ version: '3.7' services: timescale: - image: cyberw/locust-timescale:latest + image: timescale/timescaledb:latest-pg14 container_name: timescale_postgres environment: POSTGRES_PASSWORD: password - PGDATA: /var/lib/postgresql/data + POSTGRES_DB: locust ports: - - "5432:5432" + - "5433:5432" volumes: - timescale_postgres_data:/var/lib/postgresql/data @@ -25,34 +25,20 @@ services: - grafana_data:/var/lib/grafana locust: - build: - context: ./app/benchmark + image: locustio/locust:latest container_name: locust_benchmark + volumes: + - ./locustfile.py:/home/locust/locustfile.py command: > - locust - --headless + -f /home/locust/locustfile.py + --host http://host.docker.internal:8000 --users 10 --spawn-rate 2 --run-time 1m - --host http://solo-api:8000 - --timescale - depends_on: - - solo-api - environment: - LOCUST_HOST: http://solo-api:8000 - networks: - - solo-network - - solo-api: - build: - context: . - args: - PYTHON_FILE: ${PYTHON_FILE:-solo_server/templates/basic.py} - container_name: "solo-api" - environment: - - PYTHON_FILE=${PYTHON_FILE:-solo_server/templates/basic.py} - - MODEL_URL=${MODEL_URL:-your_model_url_here} - - MODEL_FILENAME=${MODEL_FILENAME:-your_model_filename_here} + ports: + - "8089:8089" + extra_hosts: + - "host.docker.internal:host-gateway" volumes: timescale_postgres_data: diff --git a/solo_server/docker-compose.yml b/solo_server/docker-compose.yml index 689d459..6c802a7 100644 --- a/solo_server/docker-compose.yml +++ b/solo_server/docker-compose.yml @@ -9,10 +9,18 @@ services: container_name: "solo-api" ports: - "8000:8000" + - "8080:8080" environment: - PYTHON_FILE=${PYTHON_FILE:-solo_server/templates/basic.py} - MODEL_URL=${MODEL_URL:-your_model_url_here} - MODEL_FILENAME=${MODEL_FILENAME:-your_model_filename_here} + - LITSERVE_TIMEOUT=120 + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/completion", "-H", "Content-Type: application/json", "-d", '{"prompt":"test","n_predict":1}'] + interval: 10s + timeout: 30s + retries: 10 + start_period: 120s networks: solo-network: diff --git a/solo_server/grafana_setup.sh b/solo_server/grafana_setup.sh old mode 100644 new mode 100755 diff --git a/solo_server/locustfile.py b/solo_server/locustfile.py index 3c13f97..9ae6f71 100644 --- a/solo_server/locustfile.py +++ b/solo_server/locustfile.py @@ -1,13 +1,33 @@ -from locust import HttpUser, task +from locust import HttpUser, task, between +import json class SoloServerUser(HttpUser): - wait_time = lambda self: 0 # No wait between tasks + wait_time = between(1, 2) @task - def generate_text(self): - """Simulates text generation requests.""" + def test_llm(self): + """Test LLM completions endpoint""" + headers = { + "Content-Type": "application/json" + } + payload = { - "prompt": "Generate a Solo app with load tests.", - "max_tokens": 100, + "prompt": "What is AI?", + "n_predict": 128 } - self.client.post("/v1/completions", json=payload) + + with self.client.post( + "/predict", + json=payload, + headers=headers, + catch_response=True + ) as response: + try: + if response.status_code == 200: + response.success() + else: + response.failure(f"Failed with status code: {response.status_code}") + except json.JSONDecodeError: + response.failure("Response could not be decoded as JSON") + except Exception as e: + response.failure(f"Error: {str(e)}") diff --git a/solo_server/templates/llm.py b/solo_server/templates/llm.py index 62bfd4a..a95df26 100644 --- a/solo_server/templates/llm.py +++ b/solo_server/templates/llm.py @@ -51,26 +51,74 @@ def setup(self, device): print("Llama model server started.") def decode_request(self, request): - return request["prompt"] + # Handle both POST /predict and direct completion requests + if isinstance(request, dict): + return request.get("prompt", request.get("input", "")) + return request def predict(self, prompt): - response = subprocess.run(["curl", "-X", "POST", "http://localhost:8080/completion", - "-H", "Content-Type: application/json", - "-d", f'{{"prompt": "{prompt}", "n_predict": 128}}'], - capture_output=True, text=True) - response_json = json.loads(response.stdout) - return response_json["content"] + try: + # Internal request to LLaMA server on 8080 + response = subprocess.run( + ["curl", "-s", "http://localhost:8080/completion", + "-H", "Content-Type: application/json", + "-d", json.dumps({ + "prompt": prompt, + "n_predict": 128 + })], + capture_output=True, + text=True, + timeout=30 + ) + + if response.returncode != 0: + print(f"Error from LLM server: {response.stderr}") + return f"Error: {response.stderr}" + + result = json.loads(response.stdout) + return result.get("content", "No content generated") + + except Exception as e: + print(f"Error in predict: {e}") + return f"Error: {str(e)}" def encode_response(self, output): - # Clean up the output by removing system tokens, newlines, and redundant text - cleaned_output = output.replace("<|eot_id|>", "") # Remove system token - cleaned_output = cleaned_output.replace("\n", " ") # Replace newlines with spaces - cleaned_output = " ".join(cleaned_output.split()) # Remove extra spaces - return {"generated_text": cleaned_output} + if isinstance(output, str): + cleaned_output = output.replace("<|eot_id|>", "").replace("\n", " ").strip() + return { + "generated_text": cleaned_output, + "status": "success" + } + return { + "error": str(output), + "status": "error" + } + + def health_check(self): + """Health check endpoint""" + try: + response = subprocess.run( + ["curl", "-s", "http://localhost:8080/completion", + "-H", "Content-Type: application/json", + "-d", '{"prompt": "test", "n_predict": 1}'], + capture_output=True, + timeout=5 + ) + return response.returncode == 0 + except: + return False # STEP 2: START THE SERVER if __name__ == "__main__": api = LlamaLitAPI() server = ls.LitServer(api, accelerator="auto") + + # Add health check endpoint + @server.app.get("/health") + async def health(): + if api.health_check(): + return {"status": "healthy"} + return {"status": "unhealthy"} + server.run(port=8000, generate_client_file=False) \ No newline at end of file