Skip to content

Commit

Permalink
Merge branch 'master' into issue-1698
Browse files Browse the repository at this point in the history
  • Loading branch information
Wendong-Fan authored Mar 5, 2025
2 parents 3b0fab8 + cd3bf60 commit e35de37
Show file tree
Hide file tree
Showing 22 changed files with 1,679 additions and 3,025 deletions.
2 changes: 1 addition & 1 deletion .github/ISSUE_TEMPLATE/bug_report.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ body:
attributes:
label: What version of camel are you using?
description: Run command `python3 -c 'print(__import__("camel").__version__)'` in your shell and paste the output here.
placeholder: E.g., 0.2.22
placeholder: E.g., 0.2.23a0
validations:
required: true

Expand Down
2 changes: 1 addition & 1 deletion camel/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

from camel.logger import disable_logging, enable_logging, set_log_level

__version__ = '0.2.22'
__version__ = '0.2.23a0'

__all__ = [
'__version__',
Expand Down
18 changes: 16 additions & 2 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -694,11 +694,18 @@ def _get_model_response(
f"index: {self.model_backend.current_model_index}",
exc_info=exc,
)
if not response:
error_info = str(exc)

if not response and self.model_backend.num_models > 1:
raise ModelProcessingError(
"Unable to process messages: none of the provided models "
"run succesfully."
)
elif not response:
raise ModelProcessingError(
f"Unable to process messages: the only provided model "
f"did not run succesfully. Error: {error_info}"
)

logger.info(
f"Model {self.model_backend.model_type}, "
Expand Down Expand Up @@ -732,11 +739,18 @@ async def _aget_model_response(
f"index: {self.model_backend.current_model_index}",
exc_info=exc,
)
if not response:
error_info = str(exc)

if not response and self.model_backend.num_models > 1:
raise ModelProcessingError(
"Unable to process messages: none of the provided models "
"run succesfully."
)
elif not response:
raise ModelProcessingError(
f"Unable to process messages: the only provided model "
f"did not run succesfully. Error: {error_info}"
)

logger.info(
f"Model {self.model_backend.model_type}, "
Expand Down
8 changes: 8 additions & 0 deletions camel/configs/openai_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,13 @@ class ChatGPTConfig(BaseConfig):
forces the model to call that tool. :obj:`"none"` is the default
when no tools are present. :obj:`"auto"` is the default if tools
are present.
reasoning_effort(str, optional): A parameter specifying the level of
reasoning used by certain model types. Valid values are :obj:
`"low"`, :obj:`"medium"`, or :obj:`"high"`. If set, it is only
applied to the model types that support it (e.g., :obj:`o1`,
:obj:`o1mini`, :obj:`o1preview`, :obj:`o3mini`). If not provided
or if the model type does not support it, this parameter is
ignored. (default: :obj:`None`)
"""

temperature: float = 0.2 # openai default: 1.0
Expand All @@ -108,6 +115,7 @@ class ChatGPTConfig(BaseConfig):
logit_bias: Dict = Field(default_factory=dict)
user: str = ""
tool_choice: Optional[Union[Dict[str, str], str]] = None
reasoning_effort: Optional[str] = None


OPENAI_API_PARAMS = {param for param in ChatGPTConfig.model_fields.keys()}
4 changes: 2 additions & 2 deletions camel/datagen/self_improving_cot.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,13 +161,13 @@ def __init__(
# Initialize output file with empty results if path is specified
if self.output_path:
with open(self.output_path, 'w') as f:
json.dump({'traces': []}, f, indent=2)
json.dump({'traces': []}, f, indent=2, ensure_ascii=False)
self.lock = threading.Lock()

def safe_write_json(self, file_path, data):
temp_path = file_path + ".tmp"
with open(temp_path, "w") as f:
json.dump(data, f, indent=2)
json.dump(data, f, indent=2, ensure_ascii=False)
os.replace(temp_path, file_path)

def clean_json(self, data):
Expand Down
9 changes: 9 additions & 0 deletions camel/models/model_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,15 @@ def current_model_index(self) -> int:
"""
return self.models.index(self.current_model)

@property
def num_models(self) -> int:
r"""Return the number of models in the manager.
Returns:
int: The number of models available in the model manager.
"""
return len(self.models)

@property
def token_limit(self):
r"""Returns the maximum token limit for current model.
Expand Down
114 changes: 103 additions & 11 deletions camel/models/sglang_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import logging
import subprocess
import threading
import time
from typing import Any, Dict, List, Optional, Type, Union
Expand Down Expand Up @@ -94,11 +95,6 @@ def __init__(
)

def _start_server(self) -> None:
from sglang.utils import ( # type: ignore[import-untyped]
execute_shell_command,
wait_for_server,
)

try:
if not self._url:
cmd = (
Expand All @@ -108,10 +104,10 @@ def _start_server(self) -> None:
f"--host 0.0.0.0"
)

server_process = execute_shell_command(cmd)
wait_for_server("http://localhost:30000")
server_process = _execute_shell_command(cmd)
_wait_for_server("http://localhost:30000")
self._url = "http://127.0.0.1:30000/v1"
self.server_process = server_process
self.server_process = server_process # type: ignore[assignment]
# Start the inactivity monitor in a background thread
self._inactivity_thread = threading.Thread(
target=self._monitor_inactivity, daemon=True
Expand All @@ -138,8 +134,6 @@ def _monitor_inactivity(self):
r"""Monitor whether the server process has been inactive for over 10
minutes.
"""
from sglang.utils import terminate_process

while True:
# Check every 10 seconds
time.sleep(10)
Expand All @@ -150,7 +144,7 @@ def _monitor_inactivity(self):
time.time() - self.last_run_time > 600
):
if self.server_process:
terminate_process(self.server_process)
_terminate_process(self.server_process)
self.server_process = None
self._client = None # Invalidate the client
logging.info(
Expand Down Expand Up @@ -270,3 +264,101 @@ def stream(self) -> bool:
bool: Whether the model is in stream mode.
"""
return self.model_config_dict.get('stream', False)


# Below are helper functions from sglang.utils
def _terminate_process(process):
_kill_process_tree(process.pid)


def _kill_process_tree(
parent_pid, include_parent: bool = True, skip_pid: Optional[int] = None
):
r"""Kill the process and all its child processes."""
import os
import signal

import psutil

if parent_pid is None:
parent_pid = os.getpid()
include_parent = False

try:
itself = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return

children = itself.children(recursive=True)
for child in children:
if child.pid == skip_pid:
continue
try:
child.kill()
except psutil.NoSuchProcess:
pass

if include_parent:
try:
itself.kill()

# Sometime processes cannot be killed with SIGKILL
# so we send an additional signal to kill them.
itself.send_signal(signal.SIGQUIT)
except psutil.NoSuchProcess:
pass


def _execute_shell_command(command: str) -> subprocess.Popen:
r"""Execute a shell command and return the process handle
Args:
command: Shell command as a string (can include \\ line continuations)
Returns:
subprocess.Popen: Process handle
"""
import subprocess

# Replace \ newline with space and split
command = command.replace("\\\n", " ").replace("\\", " ")
parts = command.split()

return subprocess.Popen(parts, text=True, stderr=subprocess.STDOUT)


def _wait_for_server(base_url: str, timeout: Optional[int] = None) -> None:
r"""Wait for the server to be ready by polling the /v1/models endpoint.
Args:
base_url: The base URL of the server
timeout: Maximum time to wait in seconds. None means wait forever.
"""
import requests

start_time = time.time()
while True:
try:
response = requests.get(
f"{base_url}/v1/models",
headers={"Authorization": "Bearer None"},
)
if response.status_code == 200:
time.sleep(5)
print(
"""\n
NOTE: Typically, the server runs in a separate terminal.
In this notebook, we run the server and notebook code
together, so their outputs are combined.
To improve clarity, the server logs are displayed in the
original black color, while the notebook outputs are
highlighted in blue.
"""
)
break

if timeout and time.time() - start_time > timeout:
raise TimeoutError(
"Server did not become ready within timeout period"
)
except requests.exceptions.RequestException:
time.sleep(1)
3 changes: 2 additions & 1 deletion camel/toolkits/ask_news_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,8 @@ def __init__(self, timeout: Optional[float] = None):
credentials are retrieved from environment variables.
"""
super().__init__(timeout=timeout)
from asknews_sdk import AskNewsSDK

from asknews_sdk import AskNewsSDK # type: ignore[import-not-found]

client_id = os.environ.get("ASKNEWS_CLIENT_ID")
client_secret = os.environ.get("ASKNEWS_CLIENT_SECRET")
Expand Down
2 changes: 1 addition & 1 deletion camel/toolkits/openbb_toolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def __init__(self, timeout: Optional[float] = None) -> None:
super().__init__(timeout=timeout)
import os

from openbb import obb
from openbb import obb # type: ignore[import-not-found]

self.client = obb
# Initialize OpenBB Hub account with access token
Expand Down
8 changes: 7 additions & 1 deletion camel/verifiers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,5 +13,11 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from .base import BaseVerifier
from .models import VerificationOutcome, VerifierInput
from .python_verifier import PythonVerifier

__all__ = ["BaseVerifier", "VerificationOutcome", "VerifierInput"]
__all__ = [
"BaseVerifier",
"VerificationOutcome",
"VerifierInput",
"PythonVerifier",
]
8 changes: 7 additions & 1 deletion camel/verifiers/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,14 @@


class VerifierInput(BaseModel):
r"""Structured input to the verifier"""

llm_response: str = Field(
description="The LLM response to be verified."
"Needs to be in a format that the verifier can handle."
)
ground_truth: Optional[str] = Field(
description="The ground truth data, if available."
None, description="The ground truth data, if available."
)


Expand All @@ -36,6 +38,10 @@ class VerificationOutcome(Enum):
ERROR = "error"
TIMEOUT = "timeout"

def __bool__(self):
r"""Only VerificationOutcome.SUCCESS is truthy; others are falsy."""
return self is VerificationOutcome.SUCCESS


class VerificationResult(BaseModel):
r"""Structured result from a verification."""
Expand Down
Loading

0 comments on commit e35de37

Please sign in to comment.