Skip to content

Commit ca909cf

Browse files
authored
Merge pull request #27 from VinciGit00/trulens-evaluetor
dev trulens_evaluetor
2 parents ae92939 + 665a914 commit ca909cf

File tree

4 files changed

+100
-0
lines changed

4 files changed

+100
-0
lines changed

examples/graph_evaluation_example.py

+24
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import os
2+
from scrapegraphai.evaluetor import TrulensEvaluator
3+
from dotenv import load_dotenv
4+
5+
load_dotenv()
6+
7+
# Define the configuration for the language model
8+
openai_key = os.getenv("OPENAI_APIKEY")
9+
10+
llm_config = {
11+
"api_key": openai_key,
12+
"model_name": "gpt-3.5-turbo",
13+
}
14+
15+
list_of_inputs = [
16+
("List me all the titles and project descriptions", "https://perinim.github.io/projects/", llm_config),
17+
("Who is the author of the project?", "https://perinim.github.io/projects/", llm_config),
18+
("What is the project about?", "https://perinim.github.io/projects/", llm_config)
19+
]
20+
21+
# Create the TrulensEvaluator instance
22+
trulens_evaluator = TrulensEvaluator()
23+
# Evaluate SmartScraperGraph on the list of inputs
24+
trulens_evaluator.evaluate(list_of_inputs)

requirements.txt

+1
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,4 @@ pandas==2.0.3
99
python-dotenv==1.0.1
1010
tiktoken>=0.5.2,<0.6.0
1111
tqdm==4.66.1
12+
trulens_eval==0.23.0

scrapegraphai/evaluetor/__init__.py

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
"""
2+
__init__.py file for evaluetor folder
3+
"""
4+
from .trulens_evaluetor import TrulensEvaluator
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
import os
2+
from scrapegraphai.graphs import SmartScraperGraph
3+
from openai import OpenAI
4+
from trulens_eval import Feedback, OpenAI as fOpenAI, Tru, Provider, Select, TruBasicApp
5+
6+
class TrulensEvaluator:
7+
"""
8+
Class for evaluating Trulens using SmartScraperGraph.
9+
10+
Attributes:
11+
12+
tru_llm_standalone_recorder: TruBasicApp instance for recording.
13+
14+
Methods:
15+
evaluate: Evaluates Trulens using SmartScraperGraph.
16+
llm_standalone: Standalone function for Trulens evaluation.
17+
"""
18+
19+
def __init__(self):
20+
standalone = StandAlone()
21+
f_custom_function = Feedback(standalone.json_complaint).on(
22+
my_text_field=Select.RecordOutput
23+
)
24+
os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_APIKEY"]
25+
client = OpenAI()
26+
tru = Tru()
27+
tru.reset_database()
28+
fopenai = fOpenAI()
29+
f_relevance = Feedback(self.fopenai.relevance).on_input_output()
30+
tru_llm_standalone_recorder = TruBasicApp(self.llm_standalone, app_id="smart_scraper_evaluator", feedbacks=[self.f_relevance, self.f_custom_function])
31+
32+
def evaluate(self, graph_params : list[tuple[str, str, dict]]):
33+
"""
34+
Evaluates Trulens using SmartScraperGraph and starts the dashboard.
35+
36+
Args:
37+
graph_params: List of tuples containing graph parameters.
38+
39+
Returns:
40+
None
41+
"""
42+
with self.tru_llm_standalone_recorder as recording:
43+
for params in graph_params:
44+
output = SmartScraperGraph(*params).run()
45+
self.tru_llm_standalone_recorder.app(params[0], output)
46+
self.tru.run_dashboard()
47+
48+
def llm_standalone(self, prompt, response):
49+
"""
50+
Standalone function for Trulens evaluation. Private method.
51+
52+
Args:
53+
prompt: Prompt for evaluation.
54+
response: Response from evaluation.
55+
56+
Returns:
57+
str: Response as a string.
58+
"""
59+
print(f"Prompt: {prompt}")
60+
return str(response)
61+
62+
"""
63+
Class for standalone Trulens evaluation. Personalise
64+
"""
65+
class StandAlone(Provider):
66+
def json_complaint(self, my_text_field: str) -> float:
67+
if '{' in my_text_field and '}' in my_text_field and ':' in my_text_field:
68+
return 1.0
69+
else:
70+
return 0.0
71+

0 commit comments

Comments
 (0)