diff --git a/.env.example b/.env.example index f6c752d5c4..e3958038f6 100644 --- a/.env.example +++ b/.env.example @@ -62,6 +62,10 @@ # JINA API (https://jina.ai/) # JINA_API_KEY="Fill your API key here" +# BurnCloud API (https://ai.burncloud.com) +# BURNCLOUD_API_KEY="Fill your API key here" +# BURNCLOUD_API_BASE_URL="Fill your Base URL here" + #=========================================== # Tools & Services API #=========================================== diff --git a/camel/configs/__init__.py b/camel/configs/__init__.py index 620ab91503..13f9ff0b86 100644 --- a/camel/configs/__init__.py +++ b/camel/configs/__init__.py @@ -17,6 +17,7 @@ from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig from .base_config import BaseConfig from .bedrock_config import BEDROCK_API_PARAMS, BedrockConfig +from .burncloud_config import BURNCLOUD_API_PARAMS, BurnCloudConfig from .cohere_config import COHERE_API_PARAMS, CohereConfig from .cometapi_config import COMETAPI_API_PARAMS, CometAPIConfig from .crynux_config import CRYNUX_API_PARAMS, CrynuxConfig @@ -93,6 +94,8 @@ 'SAMBA_CLOUD_API_PARAMS', 'TogetherAIConfig', 'TOGETHERAI_API_PARAMS', + 'BurnCloudConfig', + 'BURNCLOUD_API_PARAMS', 'CohereConfig', 'COHERE_API_PARAMS', 'CometAPIConfig', diff --git a/camel/configs/burncloud_config.py b/camel/configs/burncloud_config.py new file mode 100644 index 0000000000..9554c5f45a --- /dev/null +++ b/camel/configs/burncloud_config.py @@ -0,0 +1,70 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +from typing import Optional, Sequence, Union + +from camel.configs.base_config import BaseConfig +from camel.types import NotGiven + + +class BurnCloudConfig(BaseConfig): + r"""Defines parameters for BurnCloud's OpenAI-compatible chat completions. + + Reference: https://docs.burncloud.com/books/api + + Args: + temperature (float, optional): Sampling temperature to use, between + :obj:`0` and :obj:`2`. Higher values make the output more random, + while lower values make it more focused and deterministic. + top_p (float, optional): An alternative to sampling with temperature, + called nucleus sampling, where the model considers the results of + the tokens with top_p probability mass. + n (int, optional): How many chat completion choices to generate for + each input message. + response_format (object, optional): Response schema enforced by the + model. Setting to {"type": "json_object"} enables JSON mode. + stream (bool, optional): If True, partial deltas will be sent as + server-sent events while tokens stream back. + stop (str or list, optional): Up to :obj:`4` sequences where the API + will stop generating further tokens. + max_tokens (int, optional): Maximum number of tokens to generate in + the chat completion. Total input + output tokens must stay within + the model context window. + presence_penalty (float, optional): Number between :obj:`-2.0` and + :obj:`2.0`. Positive values penalize new tokens based on their + appearance so far, encouraging new topics. + frequency_penalty (float, optional): Number between :obj:`-2.0` and + :obj:`2.0`. Positive values penalize new tokens based on existing + frequency, reducing repetition. + user (str, optional): Unique identifier for the end-user, useful for + abuse monitoring. + tools (list[FunctionTool], optional): Tool definitions the model can + call. Currently supports function tools. + tool_choice (Union[dict[str, str], str], optional): Controls which, if + any, tool gets invoked by the model. + """ + + temperature: Optional[float] = None + top_p: Optional[float] = None + n: Optional[int] = None + stream: Optional[bool] = None + stop: Optional[Union[str, Sequence[str], NotGiven]] = None + max_tokens: Optional[Union[int, NotGiven]] = None + presence_penalty: Optional[float] = None + response_format: Optional[Union[dict, NotGiven]] = None + frequency_penalty: Optional[float] = None + user: Optional[str] = None + tool_choice: Optional[Union[dict[str, str], str]] = None + + +BURNCLOUD_API_PARAMS = {param for param in BurnCloudConfig.model_fields.keys()} diff --git a/camel/models/__init__.py b/camel/models/__init__.py index 55f728a40d..b8de94e56d 100644 --- a/camel/models/__init__.py +++ b/camel/models/__init__.py @@ -19,6 +19,7 @@ from .azure_openai_model import AzureOpenAIModel from .base_audio_model import BaseAudioModel from .base_model import BaseModelBackend +from .burncloud_model import BurnCloudModel from .cohere_model import CohereModel from .cometapi_model import CometAPIModel from .crynux_model import CrynuxModel @@ -66,6 +67,7 @@ 'OpenRouterModel', 'AzureOpenAIModel', 'AnthropicModel', + 'BurnCloudModel', 'AMDModel', 'MistralModel', 'GroqModel', diff --git a/camel/models/burncloud_model.py b/camel/models/burncloud_model.py new file mode 100644 index 0000000000..86b515d9ee --- /dev/null +++ b/camel/models/burncloud_model.py @@ -0,0 +1,76 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +import os +from typing import Any, Dict, Optional, Union + +from camel.configs import BurnCloudConfig +from camel.models.openai_compatible_model import OpenAICompatibleModel +from camel.types import ModelType +from camel.utils import BaseTokenCounter, api_keys_required + + +class BurnCloudModel(OpenAICompatibleModel): + r"""OpenAI-compatible backend for the BurnCloud API gateway. + + Args: + model_type (Union[ModelType, str]): Target model identifier supported + by BurnCloud, e.g., ``gpt-4o`` or ``deepseek-reasoner``. + model_config_dict (Optional[Dict[str, Any]], optional): Request payload + overrides. Defaults to :obj:`BurnCloudConfig().as_dict()`. + api_key (Optional[str], optional): BurnCloud API key. If omitted, + :obj:`BURNCLOUD_API_KEY` from the environment will be used. + url (Optional[str], optional): Endpoint base URL. Defaults to + ``https://ai.burncloud.com/v1`` or ``BURNCLOUD_API_BASE_URL`` when + provided. + token_counter (Optional[BaseTokenCounter], optional): Token counter to + associate with the model. Falls back to :obj:`OpenAITokenCounter` + inside :class:`OpenAICompatibleModel` if not provided. + timeout (Optional[float], optional): Timeout in seconds for API calls. + Defaults to ``MODEL_TIMEOUT`` env var or ``180`` seconds. + max_retries (int, optional): Maximum retry attempts for failed calls. + Defaults to ``3``. + **kwargs (Any): Extra keyword arguments forwarded to the underlying + OpenAI-compatible client. + """ + + @api_keys_required([('api_key', 'BURNCLOUD_API_KEY')]) + def __init__( + self, + model_type: Union[ModelType, str], + model_config_dict: Optional[Dict[str, Any]] = None, + api_key: Optional[str] = None, + url: Optional[str] = None, + token_counter: Optional[BaseTokenCounter] = None, + timeout: Optional[float] = None, + max_retries: int = 3, + **kwargs: Any, + ) -> None: + if model_config_dict is None: + model_config_dict = BurnCloudConfig().as_dict() + api_key = api_key or os.environ.get('BURNCLOUD_API_KEY') + url = url or os.environ.get( + 'BURNCLOUD_API_BASE_URL', 'https://ai.burncloud.com/v1' + ) + timeout = timeout or float(os.environ.get('MODEL_TIMEOUT', 180)) + + super().__init__( + model_type=model_type, + model_config_dict=model_config_dict, + api_key=api_key, + url=url, + token_counter=token_counter, + timeout=timeout, + max_retries=max_retries, + **kwargs, + ) diff --git a/camel/models/model_factory.py b/camel/models/model_factory.py index 6d05dcdfe0..717c56aada 100644 --- a/camel/models/model_factory.py +++ b/camel/models/model_factory.py @@ -22,6 +22,7 @@ from camel.models.aws_bedrock_model import AWSBedrockModel from camel.models.azure_openai_model import AzureOpenAIModel from camel.models.base_model import BaseModelBackend +from camel.models.burncloud_model import BurnCloudModel from camel.models.cohere_model import CohereModel from camel.models.cometapi_model import CometAPIModel from camel.models.crynux_model import CrynuxModel @@ -111,6 +112,7 @@ class ModelFactory: ModelPlatformType.QIANFAN: QianfanModel, ModelPlatformType.CRYNUX: CrynuxModel, ModelPlatformType.AIHUBMIX: AihubMixModel, + ModelPlatformType.BURNCLOUD: BurnCloudModel, } @staticmethod diff --git a/camel/types/enums.py b/camel/types/enums.py index ab30d8c4ad..5b65415069 100644 --- a/camel/types/enums.py +++ b/camel/types/enums.py @@ -1764,6 +1764,7 @@ class ModelPlatformType(Enum): NEBIUS = "nebius" COMETAPI = "cometapi" OPENROUTER = "openrouter" + BURNCLOUD = "burncloud" OLLAMA = "ollama" LITELLM = "litellm" LMSTUDIO = "lmstudio" @@ -1836,6 +1837,11 @@ def is_openrouter(self) -> bool: r"""Returns whether this platform is openrouter.""" return self is ModelPlatformType.OPENROUTER + @property + def is_burncloud(self) -> bool: + r"""Returns whether this platform is BurnCloud.""" + return self is ModelPlatformType.BURNCLOUD + @property def is_lmstudio(self) -> bool: r"""Returns whether this platform is lmstudio.""" diff --git a/camel/utils/commons.py b/camel/utils/commons.py index ca7022286e..b589bb6cc7 100644 --- a/camel/utils/commons.py +++ b/camel/utils/commons.py @@ -358,6 +358,8 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: key_way = "https://www.klavis.ai/docs" elif env_var_name == 'XAI_API_KEY': key_way = "https://api.x.ai/v1" + elif env_var_name == 'BURNCLOUD_API_KEY': + key_way = "https://ai.burncloud.com/v1" if missing_keys: raise ValueError( diff --git a/docs/key_modules/models.md b/docs/key_modules/models.md index d0a059a32d..df5a379c6f 100644 --- a/docs/key_modules/models.md +++ b/docs/key_modules/models.md @@ -64,6 +64,7 @@ CAMEL supports a wide range of models, including [OpenAI’s GPT series](https:/ | **SambaNova** | [supported models](https://docs.sambanova.ai/cloud/docs/get-started/supported-models) | | **Ollama** | [supported models](https://ollama.com/library) | | **OpenRouter** | [supported models](https://openrouter.ai/models) | +| **BurnCloud** | [supported models](https://ai.burncloud.com/pricing) | | **PPIO** | [supported models](https://ppio.com/model-api/console) | | **LiteLLM** | [supported models](https://docs.litellm.ai/docs/providers) | | **LMStudio** | [supported models](https://lmstudio.ai/models) | @@ -426,6 +427,46 @@ Integrate your favorite models into CAMEL-AI with straightforward Python calls. + + Access [BurnCloud](https://www.burncloud.com) to route OpenAI-compatible requests to GPT, Claude, DeepSeek, Grok, and other hosted models: + + - **Unified Endpoint**: Send standard OpenAI Chat Completions to `https://ai.burncloud.com/v1`. + - **Model Market**: Choose any model listed in the [BurnCloud Model Market](https://ai.burncloud.com/pricing), including reasoning and multimodal variants. + - **Drop-in Replacement**: Keep the same request schema, streaming, and tool-calling semantics as OpenAI clients. + + ```python + from camel.models import ModelFactory + from camel.types import ModelPlatformType, ModelType + from camel.configs import BurnCloudConfig + from camel.agents import ChatAgent + + model = ModelFactory.create( + model_platform=ModelPlatformType.BURNCLOUD, + model_type=ModelType.GPT_4O, + model_config_dict=BurnCloudConfig(temperature=0.2).as_dict(), + ) + + agent = ChatAgent( + system_message="You are a helpful assistant.", + model=model + ) + + response = agent.step("Summarize the CAMEL AI framework in two sentences.") + print(response.msgs[0].content) + ``` + + **Environment Variables:** + ```bash + export BURNCLOUD_API_KEY="your_burncloud_api_key" + export BURNCLOUD_API_BASE_URL="https://ai.burncloud.com/v1" # Optional override + ``` + + + BurnCloud is fully OpenAI-compatible, so you can pass any supported model identifier as a plain string (e.g., `"claude-3.5-sonnet"`) even if it isn't part of the predefined enums. + + + + Using [Groq](https://groq.com/)'s powerful models (e.g., Llama 3.3-70B): diff --git a/docs/mintlify/docs.json b/docs/mintlify/docs.json index 4d125a9945..c6cfaa59f6 100644 --- a/docs/mintlify/docs.json +++ b/docs/mintlify/docs.json @@ -244,6 +244,7 @@ "reference/camel.configs.ollama_config", "reference/camel.configs.openai_config", "reference/camel.configs.openrouter_config", + "reference/camel.configs.burncloud_config", "reference/camel.configs.ppio_config", "reference/camel.configs.qianfan_config", "reference/camel.configs.qwen_config", @@ -335,6 +336,7 @@ "reference/camel.models.openai_compatible_model", "reference/camel.models.openai_model", "reference/camel.models.openrouter_model", + "reference/camel.models.burncloud_model", "reference/camel.models.ppio_model", "reference/camel.models.qianfan_model", "reference/camel.models.qwen_model", diff --git a/docs/mintlify/key_modules/models.mdx b/docs/mintlify/key_modules/models.mdx index d0a059a32d..df5a379c6f 100644 --- a/docs/mintlify/key_modules/models.mdx +++ b/docs/mintlify/key_modules/models.mdx @@ -64,6 +64,7 @@ CAMEL supports a wide range of models, including [OpenAI’s GPT series](https:/ | **SambaNova** | [supported models](https://docs.sambanova.ai/cloud/docs/get-started/supported-models) | | **Ollama** | [supported models](https://ollama.com/library) | | **OpenRouter** | [supported models](https://openrouter.ai/models) | +| **BurnCloud** | [supported models](https://ai.burncloud.com/pricing) | | **PPIO** | [supported models](https://ppio.com/model-api/console) | | **LiteLLM** | [supported models](https://docs.litellm.ai/docs/providers) | | **LMStudio** | [supported models](https://lmstudio.ai/models) | @@ -426,6 +427,46 @@ Integrate your favorite models into CAMEL-AI with straightforward Python calls. + + Access [BurnCloud](https://www.burncloud.com) to route OpenAI-compatible requests to GPT, Claude, DeepSeek, Grok, and other hosted models: + + - **Unified Endpoint**: Send standard OpenAI Chat Completions to `https://ai.burncloud.com/v1`. + - **Model Market**: Choose any model listed in the [BurnCloud Model Market](https://ai.burncloud.com/pricing), including reasoning and multimodal variants. + - **Drop-in Replacement**: Keep the same request schema, streaming, and tool-calling semantics as OpenAI clients. + + ```python + from camel.models import ModelFactory + from camel.types import ModelPlatformType, ModelType + from camel.configs import BurnCloudConfig + from camel.agents import ChatAgent + + model = ModelFactory.create( + model_platform=ModelPlatformType.BURNCLOUD, + model_type=ModelType.GPT_4O, + model_config_dict=BurnCloudConfig(temperature=0.2).as_dict(), + ) + + agent = ChatAgent( + system_message="You are a helpful assistant.", + model=model + ) + + response = agent.step("Summarize the CAMEL AI framework in two sentences.") + print(response.msgs[0].content) + ``` + + **Environment Variables:** + ```bash + export BURNCLOUD_API_KEY="your_burncloud_api_key" + export BURNCLOUD_API_BASE_URL="https://ai.burncloud.com/v1" # Optional override + ``` + + + BurnCloud is fully OpenAI-compatible, so you can pass any supported model identifier as a plain string (e.g., `"claude-3.5-sonnet"`) even if it isn't part of the predefined enums. + + + + Using [Groq](https://groq.com/)'s powerful models (e.g., Llama 3.3-70B): diff --git a/docs/mintlify/reference/camel.configs.burncloud_config.mdx b/docs/mintlify/reference/camel.configs.burncloud_config.mdx new file mode 100644 index 0000000000..9af2e17238 --- /dev/null +++ b/docs/mintlify/reference/camel.configs.burncloud_config.mdx @@ -0,0 +1,32 @@ + + + + +## BurnCloudConfig + +```python +class BurnCloudConfig(BaseConfig): +``` + +Defines parameters for BurnCloud's OpenAI-compatible chat completions. + +**Parameters:** + +- **temperature** (float, optional): Sampling temperature between :obj:`0` and :obj:`2`. Higher values yield more random generations. +- **top_p** (float, optional): Nucleus sampling threshold that limits tokens to the specified cumulative probability mass. +- **n** (int, optional): Number of completions to generate per request. +- **response_format** (object, optional): Optional schema enforcing the output format (e.g., `{"type": "json_object"}`). +- **stream** (bool, optional): Stream partial deltas over Server-Sent Events when :obj:`True`. +- **stop** (str or list, optional): Up to four stop sequences that terminate generation. +- **max_tokens** (int, optional): Maximum tokens the model may generate for a completion. +- **presence_penalty** (float, optional): Penalizes tokens that have already appeared to encourage topic switching. +- **frequency_penalty** (float, optional): Penalizes token repetition to reduce duplicated output. +- **user** (str, optional): Identifier for the end-user to aid abuse monitoring. +- **tools** (list[FunctionTool], optional): Function tool definitions the model may call. +- **tool_choice** (Union[dict[str, str], str], optional): Forces how tools should be invoked (e.g., `"none"`, `"auto"`, or a specific function name). + + + +## BURNCLOUD_API_PARAMS + +A convenience set containing every configurable field defined on :class:`BurnCloudConfig`. diff --git a/docs/mintlify/reference/camel.models.burncloud_model.mdx b/docs/mintlify/reference/camel.models.burncloud_model.mdx new file mode 100644 index 0000000000..dda7f99cb0 --- /dev/null +++ b/docs/mintlify/reference/camel.models.burncloud_model.mdx @@ -0,0 +1,40 @@ + + + + +## BurnCloudModel + +```python +class BurnCloudModel(OpenAICompatibleModel): +``` + +OpenAI-compatible backend for the BurnCloud API gateway. + +**Parameters:** + +- **model_type** (Union[ModelType, str]): Target model identifier (e.g., `"gpt-4o"`, `"deepseek-reasoner"`). +- **model_config_dict** (Optional[Dict[str, Any]], optional): Overrides for the request payload. Defaults to :obj:`BurnCloudConfig().as_dict()`. +- **api_key** (Optional[str], optional): BurnCloud API key. Falls back to the ``BURNCLOUD_API_KEY`` environment variable. +- **url** (Optional[str], optional): Base URL for the BurnCloud endpoint. Defaults to ``https://ai.burncloud.com/v1`` or ``BURNCLOUD_API_BASE_URL`` when provided. +- **token_counter** (Optional[BaseTokenCounter], optional): Token counter implementation injected into the backend. Defaults to :class:`OpenAITokenCounter` via :class:`OpenAICompatibleModel`. +- **timeout** (Optional[float], optional): Timeout in seconds for each API call. Defaults to the ``MODEL_TIMEOUT`` environment variable or ``180`` seconds. +- **max_retries** (int, optional): Maximum retry attempts for failed requests. Defaults to ``3``. +- **kwargs** (Any): Additional keyword arguments forwarded to the underlying OpenAI-compatible client. + + + +### __init__ + +```python +def __init__( + self, + model_type: Union[ModelType, str], + model_config_dict: Optional[Dict[str, Any]] = None, + api_key: Optional[str] = None, + url: Optional[str] = None, + token_counter: Optional[BaseTokenCounter] = None, + timeout: Optional[float] = None, + max_retries: int = 3, + **kwargs: Any +): +``` diff --git a/docs/mintlify/reference/camel.types.enums.mdx b/docs/mintlify/reference/camel.types.enums.mdx index 35da593bb2..79a4ab3552 100644 --- a/docs/mintlify/reference/camel.types.enums.mdx +++ b/docs/mintlify/reference/camel.types.enums.mdx @@ -560,6 +560,16 @@ def is_openrouter(self): Returns whether this platform is openrouter. + + +### is_burncloud + +```python +def is_burncloud(self): +``` + +Returns whether this platform is BurnCloud. + ### is_lmstudio diff --git a/examples/models/burncloud_basic_chat.py b/examples/models/burncloud_basic_chat.py new file mode 100644 index 0000000000..93426c0f77 --- /dev/null +++ b/examples/models/burncloud_basic_chat.py @@ -0,0 +1,57 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +"""Minimal BurnCloud chat example. + +Before running: + +```bash +export BURNCLOUD_API_KEY="your_burncloud_api_key" +# Optional: export BURNCLOUD_API_BASE_URL="https://ai.burncloud.com/v1" +``` +""" + +from camel.agents import ChatAgent +from camel.configs import BurnCloudConfig +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + + +def main() -> None: + """Create a BurnCloud model and send a single chat request.""" + + model = ModelFactory.create( + model_platform=ModelPlatformType.BURNCLOUD, + # BurnCloud is OpenAI-compatible, so we can reuse CAMEL enums + # or pass raw strings such as "claude-3.5-sonnet". + model_type=ModelType.GPT_4O, + model_config_dict=BurnCloudConfig(temperature=0.2).as_dict(), + ) + + agent = ChatAgent( + system_message="You are a CAMEL-AI savvy technical consultant.", + model=model, + ) + + user_msg = ( + "Summarize CAMEL-AI's core capabilities in three sentences and " + "highlight its multi-agent collaboration strengths." + ) + + response = agent.step(user_msg) + print("User:\n", user_msg) + print("\nAssistant:\n", response.msgs[0].content) + + +if __name__ == "__main__": + main() diff --git a/examples/models/burncloud_streaming_cli.py b/examples/models/burncloud_streaming_cli.py new file mode 100644 index 0000000000..64d2a7f912 --- /dev/null +++ b/examples/models/burncloud_streaming_cli.py @@ -0,0 +1,104 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +"""Terminal-based BurnCloud streaming demo. + +Sample use cases: +1. Connectivity check - verify ``BURNCLOUD_API_KEY`` works. +2. Observe token streaming - BurnCloud mirrors OpenAI Chat Completions. +3. Switch regions - point ``BURNCLOUD_API_BASE_URL`` to EU or on-prem hosts. + +Before running: + +```bash +export BURNCLOUD_API_KEY="your_burncloud_api_key" +# Optional: export BURNCLOUD_API_BASE_URL="https://eu-ai.burncloud.com/v1" +``` +""" + +from __future__ import annotations + +from camel.agents import ChatAgent +from camel.configs import BurnCloudConfig +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + + +def build_streaming_agent() -> ChatAgent: + """Create a BurnCloud ChatAgent with streaming enabled.""" + + config = BurnCloudConfig(temperature=0.3, stream=True).as_dict() + # OpenAI-compatible option that returns usage stats in the final chunk. + config["stream_options"] = {"include_usage": True} + + model = ModelFactory.create( + model_platform=ModelPlatformType.BURNCLOUD, + model_type=ModelType.GPT_4O, + model_config_dict=config, + ) + + return ChatAgent( + system_message=( + "You are a concise writing assistant. Use bullet points when " + "they improve clarity." + ), + model=model, + stream_accumulate=False, # Print deltas; set True to auto-accumulate. + ) + + +def main() -> None: + agent = build_streaming_agent() + + print("BurnCloud Streaming CLI ready.") + print("Type a prompt and press Enter. Use 'exit' or Ctrl+D to quit.\n") + + while True: + try: + user_msg = input("You> ").strip() + except (EOFError, KeyboardInterrupt): + print("\nGoodbye!") + break + + if not user_msg: + continue + if user_msg.lower() in {"exit", "quit"}: + print("Goodbye!") + break + + print("BurnCloud>", end=" ", flush=True) + response_stream = agent.step(user_msg) + + final_usage = None + for chunk in response_stream: + chunk_content = chunk.msgs[0].content + if chunk_content: + print(chunk_content, end="", flush=True) + + if chunk.info and chunk.info.get("usage"): + final_usage = chunk.info["usage"] + + if final_usage: + prompt = final_usage.get("prompt_tokens", "-") + completion = final_usage.get("completion_tokens", "-") + total = final_usage.get("total_tokens", "-") + print( + "\n[usage] prompt=" + f"{prompt}, completion={completion}, total={total}" + ) + + print() # Separate from the next prompt + + +if __name__ == "__main__": + main() diff --git a/examples/toolkits/burncloud_tool_use.py b/examples/toolkits/burncloud_tool_use.py new file mode 100644 index 0000000000..4a524deaf2 --- /dev/null +++ b/examples/toolkits/burncloud_tool_use.py @@ -0,0 +1,110 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +"""BurnCloud + FunctionTool demo showing local tool integration. + +Before running: + +```bash +export BURNCLOUD_API_KEY="your_burncloud_api_key" +# Optional: export BURNCLOUD_API_BASE_URL="https://ai.burncloud.com/v1" +``` +""" + +from __future__ import annotations + +from camel.agents import ChatAgent +from camel.configs import BurnCloudConfig +from camel.models import ModelFactory +from camel.toolkits import FunctionTool +from camel.types import ModelPlatformType, ModelType + + +def estimate_solar_payback( + capacity_kw: float, + install_cost_per_kw: float, + feed_in_tariff: float, + annual_sun_hours: int, +) -> dict[str, float]: + """Estimate solar ROI metrics with a simple linear payback model.""" + + upfront_cost = capacity_kw * install_cost_per_kw + annual_generation = capacity_kw * annual_sun_hours + annual_savings = annual_generation * feed_in_tariff + payback_years = round(upfront_cost / max(annual_savings, 1e-6), 2) + + return { + "upfront_cost": round(upfront_cost, 2), + "annual_generation_kwh": round(annual_generation, 2), + "annual_savings": round(annual_savings, 2), + "payback_years": payback_years, + } + + +def main() -> None: + tool = FunctionTool(estimate_solar_payback) + + model = ModelFactory.create( + model_platform=ModelPlatformType.BURNCLOUD, + model_type=ModelType.GPT_4O, + model_config_dict=BurnCloudConfig(temperature=0.15).as_dict(), + ) + + agent = ChatAgent( + system_message=( + "You are an energy investment advisor. Reference the " + "`estimate_solar_payback` tool outputs in your reasoning and " + "summarize recommendations in English." + ), + model=model, + tools=[tool], + ) + + user_msg = ( + "A 12 kW rooftop PV project in Shanghai costs 8,200 CNY per kW, " + "earns 0.48 CNY per kWh, and gets 1,500 effective sun hours per " + "year. Present the payback calculation as a table and conclude " + "whether it is a good investment." + ) + + response = agent.step(user_msg) + + print("User:\n", user_msg) + print("\nAssistant:\n", response.msgs[0].content) + + tool_calls = response.info.get("tool_calls") if response.info else None + if tool_calls: + print("\nTool call details:") + for call in tool_calls: + # ChatAgent may return either plain dicts (OpenAI schema) or + # ToolCallingRecord instances, so normalize both shapes. + if hasattr(call, "tool_name"): + print( + f"- id={getattr(call, 'tool_call_id', 'N/A')}, " + f"name={call.tool_name}" + ) + print(f" args={call.args}") + else: + call_dict = ( + call.as_dict() if hasattr(call, "as_dict") else call + ) + fn_meta = call_dict.get("function", {}) + print( + f"- id={call_dict.get('id')}, " + f"name={fn_meta.get('name')}" + ) + print(f" args={fn_meta.get('arguments')}") + + +if __name__ == "__main__": + main() diff --git a/test/models/test_burncloud_model.py b/test/models/test_burncloud_model.py new file mode 100644 index 0000000000..4b25106650 --- /dev/null +++ b/test/models/test_burncloud_model.py @@ -0,0 +1,46 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= + +import pytest + +from camel.configs import BurnCloudConfig +from camel.models import BurnCloudModel +from camel.types import ModelType +from camel.utils import OpenAITokenCounter + + +@pytest.mark.model_backend +@pytest.mark.parametrize( + "model_type", + [ + ModelType.GPT_4O, + ModelType.GPT_4_5_PREVIEW, + ModelType.O3_MINI, + ], +) +def test_burncloud_model(model_type: ModelType, monkeypatch): + monkeypatch.setenv("BURNCLOUD_API_KEY", "test") + model = BurnCloudModel(model_type) + assert model.model_type == model_type + assert model.model_config_dict == BurnCloudConfig().as_dict() + assert isinstance(model.token_counter, OpenAITokenCounter) + assert isinstance(model.model_type.value_for_tiktoken, str) + assert isinstance(model.model_type.token_limit, int) + + +@pytest.mark.model_backend +def test_burncloud_model_stream_property(monkeypatch): + monkeypatch.setenv("BURNCLOUD_API_KEY", "test") + model = BurnCloudModel(ModelType.GPT_4O) + assert model.stream is False