Skip to content

Commit

Permalink
Merge pull request #910 from ScrapeGraphAI/codebeaver/pre/beta-904
Browse files Browse the repository at this point in the history
codebeaver/pre/beta-904 - Unit Tests
  • Loading branch information
VinciGit00 authored Jan 29, 2025
2 parents 02629e2 + 16c688f commit cdb2f61
Show file tree
Hide file tree
Showing 3 changed files with 118 additions and 9 deletions.
55 changes: 47 additions & 8 deletions tests/graphs/abstract_graph_test.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,16 @@
"""
Tests for the AbstractGraph.
"""

from unittest.mock import patch

import pytest

from langchain_aws import ChatBedrock
from langchain_ollama import ChatOllama
from langchain_openai import AzureChatOpenAI, ChatOpenAI

from scrapegraphai.graphs import AbstractGraph, BaseGraph
from scrapegraphai.models import DeepSeek, OneApi
from scrapegraphai.nodes import FetchNode, ParseNode
from unittest.mock import Mock, patch

"""
Tests for the AbstractGraph.
"""

class TestGraph(AbstractGraph):
def __init__(self, prompt: str, config: dict):
Expand Down Expand Up @@ -50,7 +48,6 @@ def run(self) -> str:

return self.final_state.get("answer", "No answer found.")


class TestAbstractGraph:
@pytest.mark.parametrize(
"llm_config, expected_model",
Expand Down Expand Up @@ -161,3 +158,45 @@ async def test_run_safe_async(self):
result = await graph.run_safe_async()
assert result == "Async result"
mock_run.assert_called_once()

def test_create_llm_with_custom_model_instance(self):
"""
Test that the _create_llm method correctly uses a custom model instance
when provided in the configuration.
"""
mock_model = Mock()
mock_model.model_name = "custom-model"

config = {
"llm": {
"model_instance": mock_model,
"model_tokens": 1000,
"model": "custom/model"
}
}

graph = TestGraph("Test prompt", config)

assert graph.llm_model == mock_model
assert graph.model_token == 1000

def test_set_common_params(self):
"""
Test that the set_common_params method correctly updates the configuration
of all nodes in the graph.
"""
# Create a mock graph with mock nodes
mock_graph = Mock()
mock_node1 = Mock()
mock_node2 = Mock()
mock_graph.nodes = [mock_node1, mock_node2]

# Create a TestGraph instance with the mock graph
with patch('scrapegraphai.graphs.abstract_graph.AbstractGraph._create_graph', return_value=mock_graph):
graph = TestGraph("Test prompt", {"llm": {"model": "openai/gpt-3.5-turbo", "openai_api_key": "sk-test"}})

# Call set_common_params with test parameters
test_params = {"param1": "value1", "param2": "value2"}
graph.set_common_params(test_params)

# Assert that update_config was called on each node with the correct parameters
46 changes: 46 additions & 0 deletions tests/test_json_scraper_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,4 +49,50 @@ def test_json_scraper_graph_with_directory(self, mock_create_llm, mock_generate_
mock_execute.assert_called_once_with({"user_prompt": "Summarize the data from all JSON files", "json_dir": "path/to/json/directory"})
mock_fetch_node.assert_called_once()
mock_generate_answer_node.assert_called_once()
mock_create_llm.assert_called_once_with({"model": "test-model", "temperature": 0})

@pytest.fixture
def mock_llm_model(self):
return Mock()

@pytest.fixture
def mock_embedder_model(self):
return Mock()

@patch('scrapegraphai.graphs.json_scraper_graph.FetchNode')
@patch('scrapegraphai.graphs.json_scraper_graph.GenerateAnswerNode')
@patch.object(JSONScraperGraph, '_create_llm')
def test_json_scraper_graph_with_single_file(self, mock_create_llm, mock_generate_answer_node, mock_fetch_node, mock_llm_model, mock_embedder_model):
"""
Test JSONScraperGraph with a single JSON file.
This test checks if the graph correctly handles a single JSON file input
and processes it to generate an answer.
"""
# Mock the _create_llm method to return a mock LLM model
mock_create_llm.return_value = mock_llm_model

# Mock the execute method of BaseGraph
with patch('scrapegraphai.graphs.json_scraper_graph.BaseGraph.execute') as mock_execute:
mock_execute.return_value = ({"answer": "Mocked answer for single JSON file"}, {})

# Create a JSONScraperGraph instance with a single JSON file
graph = JSONScraperGraph(
prompt="Analyze the data from the JSON file",
source="path/to/single/file.json",
config={"llm": {"model": "test-model", "temperature": 0}},
schema=BaseModel
)

# Set mocked embedder model
graph.embedder_model = mock_embedder_model

# Run the graph
result = graph.run()

# Assertions
assert result == "Mocked answer for single JSON file"
assert graph.input_key == "json"
mock_execute.assert_called_once_with({"user_prompt": "Analyze the data from the JSON file", "json": "path/to/single/file.json"})
mock_fetch_node.assert_called_once()
mock_generate_answer_node.assert_called_once()
mock_create_llm.assert_called_once_with({"model": "test-model", "temperature": 0})
26 changes: 25 additions & 1 deletion tests/test_search_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,28 @@ def test_get_considered_urls(self, mock_create_llm, mock_base_graph, urls):
search_graph.run()

# Assert
assert search_graph.get_considered_urls() == urls
assert search_graph.get_considered_urls() == urls

@patch('scrapegraphai.graphs.search_graph.BaseGraph')
@patch('scrapegraphai.graphs.abstract_graph.AbstractGraph._create_llm')
def test_run_no_answer_found(self, mock_create_llm, mock_base_graph):
"""
Test that the run() method returns "No answer found." when the final state
doesn't contain an "answer" key.
"""
# Arrange
prompt = "Test prompt"
config = {"llm": {"model": "test-model"}}

# Mock the _create_llm method to return a MagicMock
mock_create_llm.return_value = MagicMock()

# Mock the execute method to set the final_state without an "answer" key
mock_base_graph.return_value.execute.return_value = ({"urls": []}, {})

# Act
search_graph = SearchGraph(prompt, config)
result = search_graph.run()

# Assert
assert result == "No answer found."

0 comments on commit cdb2f61

Please sign in to comment.