Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update agents.py #104

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
240 changes: 91 additions & 149 deletions salesgpt/agents.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from copy import deepcopy
from typing import Any, Callable, Dict, List, Union

from langchain.agents import (AgentExecutor, LLMSingleActionAgent,
create_openai_tools_agent)
from langchain.agents import AgentExecutor, LLMSingleActionAgent, create_openai_tools_agent
from langchain.chains import LLMChain, RetrievalQA
from langchain.chains.base import Chain
from langchain_community.chat_models import ChatLiteLLM
Expand All @@ -21,7 +20,6 @@

def _create_retry_decorator(llm: Any) -> Callable[[Any], Any]:
import openai

errors = [
openai.Timeout,
openai.APIError,
Expand All @@ -39,13 +37,15 @@ class SalesGPT(Chain):
conversation_stage_id: str = "1"
current_conversation_stage: str = CONVERSATION_STAGES.get("1")
stage_analyzer_chain: StageAnalyzerChain = Field(...)
sales_agent_executor: Union[AgentExecutor, None] = Field(...)
knowledge_base: Union[RetrievalQA, None] = Field(...)
sales_agent_executor: Union[AgentExecutor, None] = Field(
default=None) # Adjusted to have a default value of None
knowledge_base: Union[RetrievalQA, None] = Field(
default=None) # Adjusted to have a default value of None
sales_conversation_utterance_chain: SalesConversationChain = Field(...)
conversation_stage_dict: Dict = CONVERSATION_STAGES

model_name: str = "gpt-3.5-turbo-0613"

# Adjusted default value to match the need for an explicit enabling
use_tools: bool = False
salesperson_name: str = "Ted Lasso"
salesperson_role: str = "Business Development Representative"
Expand All @@ -55,16 +55,22 @@ class SalesGPT(Chain):
conversation_purpose: str = "find out whether they are looking to achieve better sleep via buying a premier mattress."
conversation_type: str = "call"

def retrieve_conversation_stage(self, key):
return self.conversation_stage_dict.get(key, "1")

@property
def input_keys(self) -> List[str]:
return []

@property
def output_keys(self) -> List[str]:
return []
def __init__(self, **kwargs):
super().__init__(**kwargs) # Ensure proper initialization of the base class
if self.use_tools:
# Assuming `product_catalog` is provided through `kwargs` if needed
self.product_catalog = kwargs.get(
'product_catalog', 'default_catalog_path.txt')
self.knowledge_base = setup_knowledge_base(self.product_catalog)
self.tools = get_tools(self.knowledge_base)
self.initialize_tool_executor()

def initialize_tool_executor(self):
if self.use_tools:
self.sales_agent_executor = self.setup_with_tools()
else:
# Ensuring this is explicitly set to None if not using tools
self.sales_agent_executor = None

@time_logger
def seed_agent(self):
Expand All @@ -75,13 +81,12 @@ def seed_agent(self):
@time_logger
def determine_conversation_stage(self):
self.conversation_stage_id = self.stage_analyzer_chain.run(
conversation_history="\n".join(self.conversation_history).rstrip("\n"),
conversation_history="\n".join(
self.conversation_history).rstrip("\n"),
conversation_stage_id=self.conversation_stage_id,
conversation_stages="\n".join(
[
str(key) + ": " + str(value)
for key, value in CONVERSATION_STAGES.items()
]
[str(key) + ": " + str(value)
for key, value in CONVERSATION_STAGES.items()]
),
)

Expand All @@ -93,29 +98,18 @@ def determine_conversation_stage(self):
print(f"Conversation Stage: {self.current_conversation_stage}")

def human_step(self, human_input):
# process human input
human_input = "User: " + human_input + " <END_OF_TURN>"
self.conversation_history.append(human_input)

@time_logger
def step(self, stream: bool = False):
"""
Args:
stream (bool): whether or not return
streaming generator object to manipulate streaming chunks in downstream applications.
"""
if not stream:
self._call(inputs={})
else:
return self._streaming_generator()

@time_logger
def astep(self, stream: bool = False):
"""
Args:
stream (bool): whether or not return
streaming generator object to manipulate streaming chunks in downstream applications.
"""
if not stream:
self._acall(inputs={})
else:
Expand All @@ -127,9 +121,6 @@ def acall(self, *args, **kwargs):

@time_logger
def _prep_messages(self):
"""
Helper function to prepare messages to be passed to a streaming generator.
"""
prompt = self.sales_conversation_utterance_chain.prep_prompts(
[
dict(
Expand All @@ -148,7 +139,8 @@ def _prep_messages(self):

inception_messages = prompt[0][0].to_messages()

message_dict = {"role": "system", "content": inception_messages[0].content}
message_dict = {"role": "system",
"content": inception_messages[0].content}

if self.sales_conversation_utterance_chain.verbose:
print("\033[92m" + inception_messages[0].content + "\033[0m")
Expand All @@ -169,9 +161,7 @@ def _streaming_generator(self):
# Now I can loop through the output in chunks:
>> for chunk in streaming_generator:
Out: Chunk 1, Chunk 2, ... etc.
See: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb
"""

messages = self._prep_messages()

return self.sales_conversation_utterance_chain.llm.completion_with_retry(
Expand All @@ -187,7 +177,6 @@ async def acompletion_with_retry(self, llm: Any, **kwargs: Any) -> Any:

@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await acompletion(**kwargs)

return await _completion_with_retry(**kwargs)
Expand All @@ -210,9 +199,7 @@ async def _astreaming_generator(self):
>> async for chunk in streaming_generator:
await chunk ...
Out: Chunk 1, Chunk 2, ... etc.
See: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb
"""

messages = self._prep_messages()

return await self.acompletion_with_retry(
Expand All @@ -225,117 +212,72 @@ async def _astreaming_generator(self):

def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Run one step of the sales agent."""
if self.sales_agent_executor is None:
print("sales_agent_executor is not initialized.")
return {}
# Prepare inputs based on the current conversation context and other parameters.
ai_message = self.sales_agent_executor.invoke(inputs)
# Further implementation to process the AI message and integrate it into the conversation history...

def setup_with_tools(self) -> AgentExecutor:
"""
Setup the sales agent executor with tools. This is a placeholder for the actual
implementation, which will configure and return an AgentExecutor instance
using the specified tools and configurations.
"""
# This method will be further elaborated upon reaching the specified point in your instructions.

def setup_with_tools(self) -> AgentExecutor:
tools = get_tools(self.knowledge_base)
prompt = CustomPromptTemplateForTools(
template=SALES_AGENT_TOOLS_PROMPT,
tools_getter=lambda: tools,
input_variables=[
"input",
"intermediate_steps",
"salesperson_name",
"salesperson_role",
"company_name",
"company_business",
"company_values",
"conversation_purpose",
"conversation_type",
"conversation_history",
],
)
llm_chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)

# override inputs temporarily
inputs = {
"input": "",
"conversation_stage": self.current_conversation_stage,
"conversation_history": "\n".join(self.conversation_history),
"salesperson_name": self.salesperson_name,
"salesperson_role": self.salesperson_role,
"company_name": self.company_name,
"company_business": self.company_business,
"company_values": self.company_values,
"conversation_purpose": self.conversation_purpose,
"conversation_type": self.conversation_type,
}

# Generate agent's utterance
if self.use_tools:
ai_message = self.sales_agent_executor.invoke(inputs)
output = ai_message["output"]
else:
ai_message = self.sales_conversation_utterance_chain.invoke(inputs)
output = ai_message["text"]

# Add agent's response to conversation history
agent_name = self.salesperson_name
output = agent_name + ": " + output
if "<END_OF_TURN>" not in output:
output += " <END_OF_TURN>"
self.conversation_history.append(output)
print(output.replace("<END_OF_TURN>", ""))
return ai_message
tool_names = [tool.name for tool in tools]

# WARNING: this output parser is NOT reliable yet
# It makes assumptions about output from LLM which can break and throw an error
output_parser = SalesConvoOutputParser(ai_prefix=self.salesperson_name)

sales_agent_with_tools = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names,
)

sales_agent_executor = AgentExecutor.from_agent_and_tools(
agent=sales_agent_with_tools, tools=tools, verbose=self.verbose
)
return sales_agent_executor

@classmethod
@time_logger
def from_llm(cls, llm: ChatLiteLLM, verbose: bool = False, **kwargs) -> "SalesGPT":
"""Initialize the SalesGPT Controller."""
stage_analyzer_chain = StageAnalyzerChain.from_llm(llm, verbose=verbose)
if "use_custom_prompt" in kwargs.keys() and kwargs["use_custom_prompt"] is True:
use_custom_prompt = deepcopy(kwargs["use_custom_prompt"])
custom_prompt = deepcopy(kwargs["custom_prompt"])

# clean up
del kwargs["use_custom_prompt"]
del kwargs["custom_prompt"]

sales_conversation_utterance_chain = SalesConversationChain.from_llm(
llm,
verbose=verbose,
use_custom_prompt=use_custom_prompt,
custom_prompt=custom_prompt,
)

else:
sales_conversation_utterance_chain = SalesConversationChain.from_llm(
llm, verbose=verbose
)

if "use_tools" in kwargs.keys() and (
kwargs["use_tools"] == "True" or kwargs["use_tools"] is True
):
# set up agent with tools
product_catalog = kwargs["product_catalog"]
knowledge_base = setup_knowledge_base(product_catalog)
tools = get_tools(knowledge_base)

prompt = CustomPromptTemplateForTools(
template=SALES_AGENT_TOOLS_PROMPT,
tools_getter=lambda x: tools,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
# This includes the `intermediate_steps` variable because that is needed
input_variables=[
"input",
"intermediate_steps",
"salesperson_name",
"salesperson_role",
"company_name",
"company_business",
"company_values",
"conversation_purpose",
"conversation_type",
"conversation_history",
],
)
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)

tool_names = [tool.name for tool in tools]

# WARNING: this output parser is NOT reliable yet
## It makes assumptions about output from LLM which can break and throw an error
output_parser = SalesConvoOutputParser(ai_prefix=kwargs["salesperson_name"])

sales_agent_with_tools = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names,
)

sales_agent_executor = AgentExecutor.from_agent_and_tools(
agent=sales_agent_with_tools, tools=tools, verbose=verbose
)
else:
sales_agent_executor = None
knowledge_base = None

return cls(
stage_analyzer_chain=stage_analyzer_chain,
sales_conversation_utterance_chain=sales_conversation_utterance_chain,
sales_agent_executor=sales_agent_executor,
knowledge_base=knowledge_base,
model_name=llm.model,
verbose=verbose,
**kwargs,
)
instance = cls(verbose=verbose, **kwargs)
instance.llm = llm # Assuming `llm` is an instance of ChatLiteLLM or similar
instance.verbose = verbose
if kwargs.get("use_tools", False):
instance.use_tools = True
instance.product_catalog = kwargs.get(
'product_catalog', 'default_catalog_path.txt')
instance.knowledge_base = setup_knowledge_base(
instance.product_catalog)
instance.tools = get_tools(instance.knowledge_base)
instance.initialize_tool_executor()
return instance