From d7d95261b25ca8e505723c07c00fd9e631957afb Mon Sep 17 00:00:00 2001 From: RolandJAAI <38503289+RolandJAAI@users.noreply.github.com> Date: Thu, 16 Jan 2025 16:47:20 +0100 Subject: [PATCH 1/2] fix tool example with additional args --- docs/source/en/tutorials/tools.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/tutorials/tools.md b/docs/source/en/tutorials/tools.md index 41556fa33..5e6ced36a 100644 --- a/docs/source/en/tutorials/tools.md +++ b/docs/source/en/tutorials/tools.md @@ -131,7 +131,7 @@ And voilà, here's your image! 🏖️ -Then you can use this tool just like any other tool. For example, let's improve the prompt `a rabbit wearing a space suit` and generate an image of it. +Then you can use this tool just like any other tool. For example, let's improve the prompt `a rabbit wearing a space suit` and generate an image of it. This example also shows how you can pass additional arguments to the agent. ```python from smolagents import CodeAgent, HfApiModel @@ -140,7 +140,7 @@ model = HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct") agent = CodeAgent(tools=[image_generation_tool], model=model) agent.run( - "Improve this prompt, then generate an image of it.", prompt='A rabbit wearing a space suit' + "Improve this prompt, then generate an image of it.", additional_args={'user_prompt': 'A rabbit wearing a space suit'} ) ``` From 9beabfc65ebb9100872c1b39d23f73f18de8b515 Mon Sep 17 00:00:00 2001 From: RolandJAAI <38503289+RolandJAAI@users.noreply.github.com> Date: Fri, 17 Jan 2025 18:01:51 +0100 Subject: [PATCH 2/2] set ollama context length in example --- docs/source/en/guided_tour.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/source/en/guided_tour.md b/docs/source/en/guided_tour.md index 9db8ecdb9..cee4abad4 100644 --- a/docs/source/en/guided_tour.md +++ b/docs/source/en/guided_tour.md @@ -89,8 +89,9 @@ from smolagents import CodeAgent, LiteLLMModel model = LiteLLMModel( model_id="ollama_chat/llama3.2", # This model is a bit weak for agentic behaviours though - api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary + api_base="http://localhost:11434", # replace with 127.0.0.1:11434 or remote open-ai compatible server if necessary api_key="YOUR_API_KEY" # replace with API key if necessary + num_ctx=8192 # ollama default is 2048 which will fail horribly. 8192 works for easy tasks, more is better. Check https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator to calculate how much VRAM this will need for the selected model. ) agent = CodeAgent(tools=[], model=model, add_base_tools=True)