Skip to content

Commit

Permalink
dev trulens_evaluetor
Browse files Browse the repository at this point in the history
  • Loading branch information
lurenss committed Feb 22, 2024
1 parent ae92939 commit 665a914
Show file tree
Hide file tree
Showing 4 changed files with 100 additions and 0 deletions.
24 changes: 24 additions & 0 deletions examples/graph_evaluation_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import os
from scrapegraphai.evaluetor import TrulensEvaluator
from dotenv import load_dotenv

load_dotenv()

# Define the configuration for the language model
openai_key = os.getenv("OPENAI_APIKEY")

llm_config = {
"api_key": openai_key,
"model_name": "gpt-3.5-turbo",
}

list_of_inputs = [
("List me all the titles and project descriptions", "https://perinim.github.io/projects/", llm_config),
("Who is the author of the project?", "https://perinim.github.io/projects/", llm_config),
("What is the project about?", "https://perinim.github.io/projects/", llm_config)
]

# Create the TrulensEvaluator instance
trulens_evaluator = TrulensEvaluator()
# Evaluate SmartScraperGraph on the list of inputs
trulens_evaluator.evaluate(list_of_inputs)
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,4 @@ pandas==2.0.3
python-dotenv==1.0.1
tiktoken>=0.5.2,<0.6.0
tqdm==4.66.1
trulens_eval==0.23.0
4 changes: 4 additions & 0 deletions scrapegraphai/evaluetor/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
"""
__init__.py file for evaluetor folder
"""
from .trulens_evaluetor import TrulensEvaluator
71 changes: 71 additions & 0 deletions scrapegraphai/evaluetor/trulens_evaluetor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import os
from scrapegraphai.graphs import SmartScraperGraph
from openai import OpenAI
from trulens_eval import Feedback, OpenAI as fOpenAI, Tru, Provider, Select, TruBasicApp

class TrulensEvaluator:
"""
Class for evaluating Trulens using SmartScraperGraph.
Attributes:
tru_llm_standalone_recorder: TruBasicApp instance for recording.
Methods:
evaluate: Evaluates Trulens using SmartScraperGraph.
llm_standalone: Standalone function for Trulens evaluation.
"""

def __init__(self):
standalone = StandAlone()
f_custom_function = Feedback(standalone.json_complaint).on(
my_text_field=Select.RecordOutput
)
os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_APIKEY"]
client = OpenAI()
tru = Tru()
tru.reset_database()
fopenai = fOpenAI()
f_relevance = Feedback(self.fopenai.relevance).on_input_output()
tru_llm_standalone_recorder = TruBasicApp(self.llm_standalone, app_id="smart_scraper_evaluator", feedbacks=[self.f_relevance, self.f_custom_function])

def evaluate(self, graph_params : list[tuple[str, str, dict]]):
"""
Evaluates Trulens using SmartScraperGraph and starts the dashboard.
Args:
graph_params: List of tuples containing graph parameters.
Returns:
None
"""
with self.tru_llm_standalone_recorder as recording:
for params in graph_params:
output = SmartScraperGraph(*params).run()
self.tru_llm_standalone_recorder.app(params[0], output)
self.tru.run_dashboard()

def llm_standalone(self, prompt, response):
"""
Standalone function for Trulens evaluation. Private method.
Args:
prompt: Prompt for evaluation.
response: Response from evaluation.
Returns:
str: Response as a string.
"""
print(f"Prompt: {prompt}")
return str(response)

"""
Class for standalone Trulens evaluation. Personalise
"""
class StandAlone(Provider):
def json_complaint(self, my_text_field: str) -> float:
if '{' in my_text_field and '}' in my_text_field and ':' in my_text_field:
return 1.0
else:
return 0.0

0 comments on commit 665a914

Please sign in to comment.