diff --git a/autogen/import_utils.py b/autogen/import_utils.py index 1f8e408a2c..8e772df113 100644 --- a/autogen/import_utils.py +++ b/autogen/import_utils.py @@ -268,7 +268,7 @@ def decorator(o: T) -> T: return decorator -def skip_on_missing_imports(modules: Union[str, Iterable[str]], dep_target: str) -> Callable[[T], T]: +def skip_on_missing_imports(modules: Union[str, Iterable[str]], dep_target: Optional[str] = None) -> Callable[[T], T]: """Decorator to skip a test if an optional module is missing Args: @@ -287,8 +287,9 @@ def decorator(o: T) -> T: def decorator(o: T) -> T: import pytest + install_target = "" if dep_target is None else f"[{dep_target}]" return pytest.mark.skip( # type: ignore[return-value] - f"Missing module{'s' if len(missing_modules) > 1 else ''}: {', '.join(missing_modules)}. Install using 'pip install ag2[{dep_target}]'" + f"Missing module{'s' if len(missing_modules) > 1 else ''}: {', '.join(missing_modules)}. Install using 'pip install ag2{install_target}'" )(o) return decorator diff --git a/test/agentchat/contrib/capabilities/test_image_generation_capability.py b/test/agentchat/contrib/capabilities/test_image_generation_capability.py index 4002c12863..5b359f1f48 100644 --- a/test/agentchat/contrib/capabilities/test_image_generation_capability.py +++ b/test/agentchat/contrib/capabilities/test_image_generation_capability.py @@ -25,7 +25,6 @@ with optional_import_block() as result: from PIL import Image -skip_requirement = not result.is_successful filter_dict = {"model": ["gpt-4o-mini"]} @@ -102,7 +101,7 @@ def test_dalle_image_generator(dalle_config: dict[str, Any]): # Using cartesian product to generate all possible combinations of resolution, quality, and prompt @pytest.mark.parametrize("gen_config_1", itertools.product(RESOLUTIONS, QUALITIES, PROMPTS)) @pytest.mark.parametrize("gen_config_2", itertools.product(RESOLUTIONS, QUALITIES, PROMPTS)) -@pytest.mark.skipif(skip_requirement, reason="Dependencies are not installed.") +@skip_on_missing_imports(["PIL"], "unknown") def test_dalle_image_generator_cache_key( dalle_config: dict[str, Any], gen_config_1: tuple[str, str, str], gen_config_2: tuple[str, str, str] ): @@ -125,7 +124,7 @@ def test_dalle_image_generator_cache_key( assert cache_key_1 != cache_key_2 -@pytest.mark.skipif(skip_requirement, reason="Dependencies are not installed.") +@skip_on_missing_imports(["PIL"], "unknown") def test_image_generation_capability_positive(monkeypatch, image_gen_capability): """Tests ImageGeneration capability to generate images by calling the ImageGenerator. @@ -153,7 +152,7 @@ def test_image_generation_capability_positive(monkeypatch, image_gen_capability) assert auto_reply not in processed_message -@pytest.mark.skipif(skip_requirement, reason="Dependencies are not installed.") +@skip_on_missing_imports(["PIL"], "unknown") def test_image_generation_capability_negative(monkeypatch, image_gen_capability): """Tests ImageGeneration capability to generate images by calling the ImageGenerator. @@ -181,7 +180,7 @@ def test_image_generation_capability_negative(monkeypatch, image_gen_capability) assert auto_reply == processed_message -@pytest.mark.skipif(skip_requirement, reason="Dependencies are not installed.") +@skip_on_missing_imports(["PIL"], "unknown") def test_image_generation_capability_cache(monkeypatch): """Tests ImageGeneration capability to cache the generated images.""" test_image_size = (256, 256) diff --git a/test/agentchat/contrib/capabilities/test_teachable_agent.py b/test/agentchat/contrib/capabilities/test_teachable_agent.py index 59d9a7ab63..374cfcde30 100755 --- a/test/agentchat/contrib/capabilities/test_teachable_agent.py +++ b/test/agentchat/contrib/capabilities/test_teachable_agent.py @@ -11,17 +11,10 @@ from autogen import ConversableAgent from autogen.agentchat.contrib.capabilities.teachability import Teachability from autogen.formatting_utils import colored -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from ....conftest import Credentials -with optional_import_block() as result: - import chromadb # noqa: F401 - - -skip = not result.is_successful - - # Specify the model to use by uncommenting one of the following lines. # filter_dict={"model": ["gpt-4-1106-preview"]} # filter_dict={"model": ["gpt-4-0613"]} @@ -130,10 +123,7 @@ def use_task_advice_pair_phrasing(credentials: Credentials): @pytest.mark.openai -@pytest.mark.skipif( - skip, - reason="do not run if dependency is not installed or requested to skip", -) +@skip_on_missing_imports(["chromadb"], "teachable") def test_teachability_code_paths(credentials_gpt_4o_mini: Credentials): """Runs this file's unit tests.""" total_num_errors, total_num_tests = 0, 0 @@ -162,10 +152,7 @@ def test_teachability_code_paths(credentials_gpt_4o_mini: Credentials): @pytest.mark.openai -@pytest.mark.skipif( - skip, - reason="do not run if dependency is not installed or requested to skip", -) +@skip_on_missing_imports(["chromadb"], "teachable") def test_teachability_accuracy(credentials_gpt_4o_mini: Credentials): """A very cheap and fast test of teachability accuracy.""" print(colored("\nTEST TEACHABILITY ACCURACY", "light_cyan")) diff --git a/test/agentchat/contrib/capabilities/test_vision_capability.py b/test/agentchat/contrib/capabilities/test_vision_capability.py index dad08ab0f0..c54b3cf5ed 100644 --- a/test/agentchat/contrib/capabilities/test_vision_capability.py +++ b/test/agentchat/contrib/capabilities/test_vision_capability.py @@ -11,15 +11,12 @@ from autogen.agentchat.contrib.capabilities.vision_capability import VisionCapability from autogen.agentchat.conversable_agent import ConversableAgent -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports with optional_import_block() as result: from PIL import Image # noqa: F401 -skip_test = not result.is_successful - - @pytest.fixture def lmm_config(): return { @@ -42,24 +39,14 @@ def conversable_agent(): return ConversableAgent(name="conversable_agent", llm_config=False) -@pytest.mark.skipif( - skip_test, - reason="do not run if dependency is not installed", -) +@skip_on_missing_imports(["PIL"], "unknown") def test_add_to_conversable_agent(vision_capability, conversable_agent): vision_capability.add_to_agent(conversable_agent) assert hasattr(conversable_agent, "process_last_received_message") -@pytest.mark.skipif( - skip_test, - reason="do not run if dependency is not installed", -) +@skip_on_missing_imports(["PIL"], "unknown") @patch("autogen.oai.client.OpenAIWrapper") -@pytest.mark.skipif( - skip_test, - reason="do not run if dependency is not installed", -) def test_process_last_received_message_text(mock_lmm_client, vision_capability): mock_lmm_client.create.return_value = MagicMock(choices=[MagicMock(message=MagicMock(content="A description"))]) content = "Test message without image" @@ -76,10 +63,7 @@ def test_process_last_received_message_text(mock_lmm_client, vision_capability): "autogen.agentchat.contrib.capabilities.vision_capability.VisionCapability._get_image_caption", return_value="A sample image caption.", ) -@pytest.mark.skipif( - skip_test, - reason="do not run if dependency is not installed", -) +@skip_on_missing_imports(["PIL"], "unknown") def test_process_last_received_message_with_image( mock_get_caption, mock_convert_base64, mock_get_image_data, vision_capability ): @@ -105,10 +89,7 @@ def caption_func(image_url: str, image_data=None, lmm_client=None) -> str: return caption_func -@pytest.mark.skipif( - skip_test, - reason="do not run if dependency is not installed", -) +@skip_on_missing_imports(["PIL"], "unknown") class TestCustomCaptionFunc: def test_custom_caption_func_with_valid_url(self, custom_caption_func): """Test custom caption function with a valid image URL.""" diff --git a/test/agentchat/contrib/graph_rag/test_falkor_graph_rag.py b/test/agentchat/contrib/graph_rag/test_falkor_graph_rag.py index 1c7a17fc1a..14048ed4de 100644 --- a/test/agentchat/contrib/graph_rag/test_falkor_graph_rag.py +++ b/test/agentchat/contrib/graph_rag/test_falkor_graph_rag.py @@ -13,22 +13,21 @@ FalkorGraphQueryEngine, GraphStoreQueryResult, ) -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports with optional_import_block() as result: - import falkordb # noqa: F401 from graphrag_sdk import Attribute, AttributeType, Entity, Ontology, Relation -skip = not result.is_successful reason = "do not run on MacOS or windows OR dependency is not installed" @pytest.mark.openai @pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, + sys.platform in ["darwin", "win32"], reason=reason, ) +@skip_on_missing_imports(["falkordb", "graphrag_sdk"], "neo4j") def test_falkor_db_query_engine(): """Test FalkorDB Query Engine. 1. create a test FalkorDB Query Engine with a schema. diff --git a/test/agentchat/contrib/graph_rag/test_native_neo4j_graph_rag.py b/test/agentchat/contrib/graph_rag/test_native_neo4j_graph_rag.py index a303578a26..74364584a8 100644 --- a/test/agentchat/contrib/graph_rag/test_native_neo4j_graph_rag.py +++ b/test/agentchat/contrib/graph_rag/test_native_neo4j_graph_rag.py @@ -12,18 +12,10 @@ GraphStoreQueryResult, Neo4jNativeGraphQueryEngine, ) -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from ....conftest import reason -with optional_import_block() as result: - from neo4j import GraphDatabase # noqa: F401 - from neo4j_graphrag.embeddings import Embedder # noqa: F401 - - -skip = not result.is_successful - - # Configure the logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) @@ -115,9 +107,10 @@ def neo4j_native_query_engine_auto(): @pytest.mark.openai @pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, + sys.platform in ["darwin", "win32"], reason=reason, ) +@skip_on_missing_imports(["neo4j", "neo4j_graphrag"], "neo4j") def test_neo4j_native_query_engine(neo4j_native_query_engine): """Test querying with initialized knowledge graph""" question = "Which company is the employer?" @@ -129,9 +122,10 @@ def test_neo4j_native_query_engine(neo4j_native_query_engine): @pytest.mark.openai @pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, + sys.platform in ["darwin", "win32"], reason=reason, ) +@skip_on_missing_imports(["neo4j", "neo4j_graphrag"], "neo4j") def test_neo4j_native_query_auto(neo4j_native_query_engine_auto): """Test querying with auto-generated property graph""" question = "Which company is the employer?" diff --git a/test/agentchat/contrib/graph_rag/test_neo4j_graph_rag.py b/test/agentchat/contrib/graph_rag/test_neo4j_graph_rag.py index 79c6faae0e..56ed58589a 100644 --- a/test/agentchat/contrib/graph_rag/test_neo4j_graph_rag.py +++ b/test/agentchat/contrib/graph_rag/test_neo4j_graph_rag.py @@ -15,17 +15,10 @@ GraphStoreQueryResult, Neo4jGraphQueryEngine, ) -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from ....conftest import reason -with optional_import_block() as result: - from llama_index.core import PropertyGraphIndex # noqa: F401 - - -skip = not result.is_successful - - # Configure the logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) @@ -35,6 +28,7 @@ # Test fixture for creating and initializing a query engine with a JSON input file @pytest.fixture(scope="module") +@skip_on_missing_imports(["llama_index"], "neo4j") def neo4j_query_engine_with_json(): input_path = "./test/agentchat/contrib/graph_rag/layout_parser_paper_parsed_elements.json" input_documents = [Document(doctype=DocumentType.JSON, path_or_url=input_path)] @@ -122,9 +116,10 @@ def neo4j_query_engine_auto(): @pytest.mark.openai @pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, + sys.platform in ["darwin", "win32"], reason=reason, ) +@skip_on_missing_imports(["llama_index"], "neo4j") def test_neo4j_query_engine(neo4j_query_engine): """Test querying functionality of the Neo4j Query Engine.""" question = "Which company is the employer?" @@ -139,9 +134,10 @@ def test_neo4j_query_engine(neo4j_query_engine): @pytest.mark.openai @pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, + sys.platform in ["darwin", "win32"], reason=reason, ) +@skip_on_missing_imports(["llama_index"], "neo4j") def test_neo4j_add_records(neo4j_query_engine): """Test the add_records functionality of the Neo4j Query Engine.""" input_path = "./test/agentchat/contrib/graph_rag/the_matrix.txt" @@ -161,9 +157,10 @@ def test_neo4j_add_records(neo4j_query_engine): @pytest.mark.openai @pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, + sys.platform in ["darwin", "win32"], reason=reason, ) +@skip_on_missing_imports(["llama_index"], "neo4j") def test_neo4j_auto(neo4j_query_engine_auto): """Test querying with auto-generated property graph""" question = "Which company is the employer?" @@ -175,9 +172,10 @@ def test_neo4j_auto(neo4j_query_engine_auto): @pytest.mark.openai @pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, + sys.platform in ["darwin", "win32"], reason=reason, ) +@skip_on_missing_imports(["llama_index"], "neo4j") def test_neo4j_json_auto(neo4j_query_engine_with_json): """Test querying with auto-generated property graph from a JSON file.""" question = "What are current layout detection models in the LayoutParser model zoo?" diff --git a/test/agentchat/contrib/retrievechat/test_pgvector_retrievechat.py b/test/agentchat/contrib/retrievechat/test_pgvector_retrievechat.py index dadf394ab8..39c1a37bfb 100644 --- a/test/agentchat/contrib/retrievechat/test_pgvector_retrievechat.py +++ b/test/agentchat/contrib/retrievechat/test_pgvector_retrievechat.py @@ -9,32 +9,25 @@ import os import pytest -from sentence_transformers import SentenceTransformer from autogen import AssistantAgent from autogen.agentchat.contrib.retrieve_user_proxy_agent import ( RetrieveUserProxyAgent, ) -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from ....conftest import Credentials with optional_import_block() as result: - import chromadb # noqa: F401 import pgvector # noqa: F401 - from IPython import get_ipython # noqa: F401 + from sentence_transformers import SentenceTransformer -skip = not result.is_successful - test_dir = os.path.join(os.path.dirname(__file__), "../../..", "test_files") @pytest.mark.openai -@pytest.mark.skipif( - skip, - reason="dependency is not installed OR requested to skip", -) +@skip_on_missing_imports(["chromadb", "pgvector", "IPython", "sentence_transformers"], "retrievechat-pgvector") def test_retrievechat(credentials_gpt_4o_mini: Credentials): conversations = {} diff --git a/test/agentchat/contrib/retrievechat/test_qdrant_retrievechat.py b/test/agentchat/contrib/retrievechat/test_qdrant_retrievechat.py index 67af468854..adc742d529 100755 --- a/test/agentchat/contrib/retrievechat/test_qdrant_retrievechat.py +++ b/test/agentchat/contrib/retrievechat/test_qdrant_retrievechat.py @@ -17,28 +17,20 @@ create_qdrant_from_dir, query_qdrant, ) -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from ....conftest import Credentials with optional_import_block() as result: - import fastembed # noqa: F401 from qdrant_client import QdrantClient -QDRANT_INSTALLED = result.is_successful - -with optional_import_block() as result: - import openai # noqa: F401 - -skip = not result.is_successful - - @pytest.mark.openai @pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or not QDRANT_INSTALLED or skip, + sys.platform in ["darwin", "win32"], reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip", ) +@skip_on_missing_imports(["qdrant_client", "fastembed", "openai"], "retrievechat-qdrant") def test_retrievechat(credentials_gpt_4o_mini: Credentials): conversations = {} # ChatCompletion.start_logging(conversations) # deprecated in v0.2 @@ -73,7 +65,7 @@ def test_retrievechat(credentials_gpt_4o_mini: Credentials): @pytest.mark.openai -@pytest.mark.skipif(not QDRANT_INSTALLED, reason="qdrant_client is not installed") +@skip_on_missing_imports(["qdrant_client", "fastembed"], "retrievechat-qdrant") def test_qdrant_filter(): client = QdrantClient(":memory:") create_qdrant_from_dir(dir_path="./website/docs", client=client, collection_name="autogen-docs") @@ -89,7 +81,7 @@ def test_qdrant_filter(): @pytest.mark.openai -@pytest.mark.skipif(not QDRANT_INSTALLED, reason="qdrant_client is not installed") +@skip_on_missing_imports(["qdrant_client", "fastembed"], "retrievechat-qdrant") def test_qdrant_search(): test_dir = os.path.join(os.path.dirname(__file__), "../../..", "test_files") client = QdrantClient(":memory:") diff --git a/test/agentchat/contrib/retrievechat/test_retrievechat.py b/test/agentchat/contrib/retrievechat/test_retrievechat.py index e9eb32701c..9a811f09a4 100755 --- a/test/agentchat/contrib/retrievechat/test_retrievechat.py +++ b/test/agentchat/contrib/retrievechat/test_retrievechat.py @@ -14,27 +14,24 @@ from autogen.agentchat.contrib.retrieve_user_proxy_agent import ( RetrieveUserProxyAgent, ) -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from ....conftest import Credentials, reason with optional_import_block() as result: import chromadb - import openai # noqa: F401 - from IPython import get_ipython # noqa: F401 from chromadb.utils import embedding_functions as ef -skip = not result.is_successful - reason = "do not run on MacOS or windows OR dependency is not installed OR " + reason @pytest.mark.openai @pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, + sys.platform in ["darwin", "win32"], reason=reason, ) +@skip_on_missing_imports(["chromadb", "IPython", "openai"], "retrievechat") def test_retrievechat(credentials_gpt_4o_mini: Credentials): conversations = {} # autogen.ChatCompletion.start_logging(conversations) # deprecated in v0.2 @@ -75,9 +72,10 @@ def test_retrievechat(credentials_gpt_4o_mini: Credentials): @pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, + sys.platform in ["darwin", "win32"], reason=reason, ) +@skip_on_missing_imports(["chromadb", "IPython", "openai"], "retrievechat") def test_retrieve_config(): # test warning message when no docs_path is provided ragproxyagent = RetrieveUserProxyAgent( diff --git a/test/agentchat/contrib/test_agent_builder.py b/test/agentchat/contrib/test_agent_builder.py index c816fb95a2..5d4860b5cb 100755 --- a/test/agentchat/contrib/test_agent_builder.py +++ b/test/agentchat/contrib/test_agent_builder.py @@ -12,7 +12,7 @@ import pytest from autogen.agentchat.contrib.captainagent.agent_builder import AgentBuilder -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from ...conftest import KEY_LOC, OAI_CONFIG_LIST @@ -20,7 +20,6 @@ import chromadb # noqa: F401 import huggingface_hub # noqa: F401 -skip = not result.is_successful here = os.path.abspath(os.path.dirname(__file__)) @@ -72,10 +71,7 @@ def test_build(builder: AgentBuilder): @pytest.mark.openai -@pytest.mark.skipif( - skip, - reason="dependency not installed", -) +@skip_on_missing_imports(["chromadb", "huggingface_hub"], "autobuild") def test_build_from_library(builder: AgentBuilder): building_task = ( "Find a paper on arxiv by programming, and analyze its application in some domain. " diff --git a/test/agentchat/contrib/test_captainagent.py b/test/agentchat/contrib/test_captainagent.py index 66ab74edd3..abc89c95a3 100644 --- a/test/agentchat/contrib/test_captainagent.py +++ b/test/agentchat/contrib/test_captainagent.py @@ -7,16 +7,14 @@ from autogen import UserProxyAgent from autogen.agentchat.contrib.captainagent.captainagent import CaptainAgent -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports -from ...conftest import KEY_LOC, OAI_CONFIG_LIST, Credentials, reason +from ...conftest import KEY_LOC, OAI_CONFIG_LIST, Credentials with optional_import_block() as result: import chromadb # noqa: F401 import huggingface_hub # noqa: F401 -skip = not result.is_successful - @pytest.mark.openai def test_captain_agent_from_scratch(credentials_all: Credentials): @@ -58,10 +56,7 @@ def test_captain_agent_from_scratch(credentials_all: Credentials): @pytest.mark.openai -@pytest.mark.skipif( - skip, - reason=reason, -) +@skip_on_missing_imports(["chromadb", "huggingface_hub"], "autobuild") def test_captain_agent_with_library(credentials_all: Credentials): config_list = credentials_all.config_list llm_config = { diff --git a/test/agentchat/contrib/test_img_utils.py b/test/agentchat/contrib/test_img_utils.py index 98add69b51..55aeeae048 100755 --- a/test/agentchat/contrib/test_img_utils.py +++ b/test/agentchat/contrib/test_img_utils.py @@ -12,7 +12,6 @@ from unittest.mock import patch import numpy as np -import pytest import requests from autogen.agentchat.contrib.img_utils import ( @@ -25,13 +24,14 @@ message_formatter_pil_to_b64, num_tokens_from_gpt_image, ) -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports with optional_import_block() as result: from PIL import Image -skip = not result.is_successful +if result.is_successful: + raw_pil_image = Image.new("RGB", (10, 10), color="red") base64_encoded_image = ( @@ -45,13 +45,8 @@ "//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==" ) -if skip: - raw_pil_image = None -else: - raw_pil_image = Image.new("RGB", (10, 10), color="red") - -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["PIL"], "unknown") class TestGetPilImage(unittest.TestCase): def test_read_local_file(self): # Create a small red image for testing @@ -73,7 +68,7 @@ def are_b64_images_equal(x: str, y: str): return (np.array(img1) == np.array(img2)).all() -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["PIL"], "unknown") class TestGetImageData(unittest.TestCase): def test_http_image(self): with patch("requests.get") as mock_get: @@ -105,7 +100,7 @@ def test_local_image(self): os.remove(temp_file) -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["PIL"], "unknown") class TestLlavaFormater(unittest.TestCase): def test_no_images(self): """Test the llava_formatter function with a prompt containing no images.""" @@ -137,7 +132,7 @@ def test_with_ordered_images(self, mock_get_image_data): self.assertEqual(result, expected_output) -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["PIL"], "unknown") class TestGpt4vFormatter(unittest.TestCase): def test_no_images(self): """Test the gpt4v_formatter function with a prompt containing no images.""" @@ -207,7 +202,7 @@ def test_multiple_images(self, mock_get_image_data): self.assertEqual(result, expected_output) -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["PIL"], "unknown") class TestExtractImgPaths(unittest.TestCase): def test_no_images(self): """Test the extract_img_paths function with a paragraph containing no images.""" @@ -240,7 +235,7 @@ def test_local_paths(self): self.assertEqual(result, expected_output) -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["PIL"], "unknown") class MessageFormatterPILtoB64Test(unittest.TestCase): def test_formatting(self): messages = [ diff --git a/test/agentchat/contrib/test_llamaindex_conversable_agent.py b/test/agentchat/contrib/test_llamaindex_conversable_agent.py index b20e9fe5d2..e4890f98de 100644 --- a/test/agentchat/contrib/test_llamaindex_conversable_agent.py +++ b/test/agentchat/contrib/test_llamaindex_conversable_agent.py @@ -8,12 +8,10 @@ from unittest.mock import MagicMock, patch -import pytest - from autogen import GroupChat, GroupChatManager from autogen.agentchat.contrib.llamaindex_conversable_agent import LLamaIndexConversableAgent from autogen.agentchat.conversable_agent import ConversableAgent -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from ...conftest import MOCK_OPEN_AI_API_KEY @@ -22,14 +20,11 @@ from llama_index.core.chat_engine.types import AgentChatResponse from llama_index.llms.openai import OpenAI -skip_for_dependencies = not result.is_successful -skip_reason = "" if result.is_successful else "dependency not installed" - openai_key = MOCK_OPEN_AI_API_KEY -@pytest.mark.skipif(skip_for_dependencies, reason=skip_reason) +@skip_on_missing_imports(["llama_index"], "neo4j") @patch("llama_index.core.agent.ReActAgent.chat") def test_group_chat_with_llama_index_conversable_agent(chat_mock: MagicMock) -> None: """Tests the group chat functionality with two MultimodalConversable Agents. diff --git a/test/agentchat/contrib/test_llava.py b/test/agentchat/contrib/test_llava.py index 37ae0b2994..b834d53640 100755 --- a/test/agentchat/contrib/test_llava.py +++ b/test/agentchat/contrib/test_llava.py @@ -9,20 +9,13 @@ import unittest from unittest.mock import MagicMock, patch -import pytest - from autogen.agentchat.contrib.llava_agent import LLaVAAgent, _llava_call_binary_with_config, llava_call -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from ...conftest import MOCK_OPEN_AI_API_KEY -with optional_import_block() as result: - import replicate # noqa: F401 - -skip = not result.is_successful - -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["replicate"], "lmm") class TestLLaVAAgent(unittest.TestCase): def setUp(self): self.agent = LLaVAAgent( @@ -38,7 +31,7 @@ def test_init(self): self.assertIsInstance(self.agent, LLaVAAgent) -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["replicate"], "lmm") class TestLLavaCallBinaryWithConfig(unittest.TestCase): @patch("requests.post") def test_local_mode(self, mock_post): @@ -96,7 +89,7 @@ def test_remote_mode(self, mock_run): ) -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["replicate"], "lmm") class TestLLavaCall(unittest.TestCase): @patch("autogen.agentchat.contrib.llava_agent.llava_formatter") @patch("autogen.agentchat.contrib.llava_agent.llava_call_binary") diff --git a/test/agentchat/contrib/test_lmm.py b/test/agentchat/contrib/test_lmm.py index 7a87bd4699..866c73cbe4 100755 --- a/test/agentchat/contrib/test_lmm.py +++ b/test/agentchat/contrib/test_lmm.py @@ -9,36 +9,21 @@ import unittest from unittest.mock import MagicMock -import pytest - import autogen from autogen.agentchat.contrib.img_utils import get_pil_image from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent from autogen.agentchat.conversable_agent import ConversableAgent -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from ...conftest import MOCK_OPEN_AI_API_KEY -with optional_import_block() as result: - from PIL import Image # noqa: F401 - - -skip = not result.is_successful - - base64_encoded_image = ( "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4" "//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==" ) -if skip: - pil_image = None -else: - pil_image = get_pil_image(base64_encoded_image) - - -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["PIL"], "unknown") class TestMultimodalConversableAgent(unittest.TestCase): def setUp(self): self.agent = MultimodalConversableAgent( @@ -65,6 +50,8 @@ def test_system_message(self): # Test updating system message new_message = f"We will discuss in this conversation." self.agent.update_system_message(new_message) + + pil_image = get_pil_image(base64_encoded_image) self.assertEqual( self.agent.system_message, [ @@ -97,7 +84,7 @@ def test_print_received_message(self): self.agent._print_received_message.assert_called_with(message_str, sender) -@pytest.mark.skipif(skip, reason="Dependency not installed") +@skip_on_missing_imports(["PIL"], "unknown") def test_group_chat_with_lmm(): """Tests the group chat functionality with two MultimodalConversable Agents. Verifies that the chat is correctly limited by the max_round parameter. diff --git a/test/agentchat/contrib/test_reasoning_agent.py b/test/agentchat/contrib/test_reasoning_agent.py index 3aeb76e6cf..596c99da2a 100644 --- a/test/agentchat/contrib/test_reasoning_agent.py +++ b/test/agentchat/contrib/test_reasoning_agent.py @@ -14,17 +14,11 @@ import pytest from autogen.agentchat.contrib.reasoning_agent import ReasoningAgent, ThinkNode, visualize_tree -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports sys.path.append(os.path.join(os.path.dirname(__file__), "../..")) -with optional_import_block() as result: - from graphviz import Digraph # noqa: F401 - -skip_for_dependencies = not result.is_successful -skip_reason = "" if result.is_successful else "dependency not installed" - here = os.path.abspath(os.path.dirname(__file__)) # Test data @@ -215,7 +209,7 @@ def mock_response(*args, **kwargs): assert max_depth_found <= agent._max_depth -@pytest.mark.skipif(skip_for_dependencies, reason=skip_reason) +@skip_on_missing_imports(["graphviz"], "unknown") @patch("graphviz.Digraph") def test_visualize_tree_successful_case(mock_digraph): """Test successful tree visualization""" @@ -264,7 +258,7 @@ def test_visualize_tree_successful_case(mock_digraph): mock_graph.render.assert_called_once_with("tree_of_thoughts", view=False, format="png", cleanup=True) -@pytest.mark.skipif(skip_for_dependencies, reason=skip_reason) +@skip_on_missing_imports(["graphviz"], "unknown") @patch("graphviz.Digraph") def test_visualize_tree_render_failure(mock_digraph): """Test visualization when rendering fails""" diff --git a/test/agentchat/contrib/test_web_surfer.py b/test/agentchat/contrib/test_web_surfer.py index 65f467817b..617fcd91e9 100755 --- a/test/agentchat/contrib/test_web_surfer.py +++ b/test/agentchat/contrib/test_web_surfer.py @@ -13,7 +13,7 @@ from autogen import UserProxyAgent from autogen.agentchat.contrib.web_surfer import WebSurferAgent -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from ...conftest import MOCK_OPEN_AI_API_KEY, Credentials @@ -29,9 +29,6 @@ from bs4 import BeautifulSoup # noqa: F401 -skip_all = not result.is_successful - - try: BING_API_KEY = os.environ["BING_API_KEY"] except KeyError: @@ -40,10 +37,7 @@ skip_bing = False -@pytest.mark.skipif( - skip_all, - reason="do not run if dependency is not installed", -) +@skip_on_missing_imports(["markdownify", "pathvalidate", "pdfminer", "requests", "bs4"], "websurfer") def test_web_surfer() -> None: with pytest.MonkeyPatch.context() as mp: # we mock the API key so we can register functions (llm_config must be present for this to work) @@ -100,10 +94,7 @@ def test_web_surfer() -> None: @pytest.mark.openai -@pytest.mark.skipif( - skip_all, - reason="dependency is not installed", -) +@skip_on_missing_imports(["markdownify", "pathvalidate", "pdfminer", "requests", "bs4"], "websurfer") def test_web_surfer_oai(credentials_gpt_4o_mini: Credentials, credentials_gpt_4o: Credentials) -> None: llm_config = {"config_list": credentials_gpt_4o.config_list, "timeout": 180, "cache_seed": 42} diff --git a/test/agentchat/contrib/vectordb/test_chromadb.py b/test/agentchat/contrib/vectordb/test_chromadb.py index c45f7f1175..48c67a7931 100644 --- a/test/agentchat/contrib/vectordb/test_chromadb.py +++ b/test/agentchat/contrib/vectordb/test_chromadb.py @@ -10,7 +10,7 @@ import pytest from autogen.agentchat.contrib.vectordb.chromadb import ChromaVectorDB -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports sys.path.append(os.path.join(os.path.dirname(__file__), "..")) @@ -20,10 +20,7 @@ import sentence_transformers # noqa: F401 -skip = not result.is_successful - - -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["chromadb", "sentence_transformers"], "retrievechat") def test_chromadb(): # test create collection db = ChromaVectorDB(path=".db") diff --git a/test/agentchat/contrib/vectordb/test_mongodb.py b/test/agentchat/contrib/vectordb/test_mongodb.py index 76877e6109..9464392a19 100644 --- a/test/agentchat/contrib/vectordb/test_mongodb.py +++ b/test/agentchat/contrib/vectordb/test_mongodb.py @@ -13,22 +13,12 @@ from autogen.agentchat.contrib.vectordb.base import Document from autogen.agentchat.contrib.vectordb.mongodb import MongoDBAtlasVectorDB -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports with optional_import_block() as result: - import pymongo # noqa: F401 - import sentence_transformers # noqa: F401 - - -if not result.is_successful: - # To display warning in pyproject.toml [tool.pytest.ini_options] set log_cli = true - logger = logging.getLogger(__name__) - logger.warning(f"skipping {__name__}. It requires one to pip install pymongo or the extra [retrievechat-mongodb]") - pytest.skip(allow_module_level=True) - -from pymongo import MongoClient -from pymongo.collection import Collection -from pymongo.errors import OperationFailure + from pymongo import MongoClient + from pymongo.collection import Collection + from pymongo.errors import OperationFailure logger = logging.getLogger(__name__) @@ -61,7 +51,7 @@ def _wait_for_predicate(predicate, err, timeout=TIMEOUT, interval=DELAY): sleep(DELAY) -def _delete_search_indexes(collection: Collection, wait=True): +def _delete_search_indexes(collection: "Collection", wait=True): """Deletes all indexes in a collection Args: @@ -143,6 +133,7 @@ def collection_name(): return f"{MONGODB_COLLECTION}_{collection_id}" +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_create_collection(db, collection_name): """Def create_collection(collection_name: str, overwrite: bool = False) -> Collection @@ -172,6 +163,7 @@ def test_create_collection(db, collection_name): db.create_collection(collection_name=collection_name, overwrite=False, get_or_create=False) +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_get_collection(db, collection_name): with pytest.raises(ValueError): db.get_collection() @@ -185,6 +177,7 @@ def test_get_collection(db, collection_name): assert collection_got.name == db.active_collection.name +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_delete_collection(db, collection_name): assert collection_name not in db.list_collections() collection = db.create_collection(collection_name) @@ -193,6 +186,7 @@ def test_delete_collection(db, collection_name): assert collection_name not in db.list_collections() +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_insert_docs(db, collection_name, example_documents): # Test that there's an active collection with pytest.raises(ValueError) as exc: @@ -218,6 +212,7 @@ def test_insert_docs(db, collection_name, example_documents): assert len(found[0]["embedding"]) == 384 +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_update_docs(db_with_indexed_clxn, example_documents): db, collection = db_with_indexed_clxn # Use update_docs to insert new documents @@ -253,6 +248,7 @@ def test_update_docs(db_with_indexed_clxn, example_documents): assert collection.find_one({"_id": new_id}) is None +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_delete_docs(db_with_indexed_clxn, example_documents): db, clxn = db_with_indexed_clxn # Insert example documents @@ -263,6 +259,7 @@ def test_delete_docs(db_with_indexed_clxn, example_documents): assert {2, "2"} == {doc["_id"] for doc in clxn.find({})} +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_get_docs_by_ids(db_with_indexed_clxn, example_documents): db, clxn = db_with_indexed_clxn # Insert example documents @@ -288,11 +285,13 @@ def test_get_docs_by_ids(db_with_indexed_clxn, example_documents): assert len(docs) == 4 +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_retrieve_docs_empty(db_with_indexed_clxn): db, clxn = db_with_indexed_clxn assert db.retrieve_docs(queries=["Cats"], collection_name=clxn.name, n_results=2) == [] +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_retrieve_docs_populated_db_empty_query(db_with_indexed_clxn, example_documents): db, clxn = db_with_indexed_clxn db.insert_docs(example_documents, collection_name=clxn.name) @@ -301,6 +300,7 @@ def test_retrieve_docs_populated_db_empty_query(db_with_indexed_clxn, example_do assert results == [] +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_retrieve_docs(db_with_indexed_clxn, example_documents): """Begin testing Atlas Vector Search NOTE: Indexing may take some time, so we must be patient on the first query. @@ -324,6 +324,7 @@ def results_ready(): assert all(["embedding" not in doc[0] for doc in results[0]]) +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_retrieve_docs_with_embedding(db_with_indexed_clxn, example_documents): """Begin testing Atlas Vector Search NOTE: Indexing may take some time, so we must be patient on the first query. @@ -347,6 +348,7 @@ def results_ready(): assert all(["embedding" in doc[0] for doc in results[0]]) +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_retrieve_docs_multiple_queries(db_with_indexed_clxn, example_documents): db, clxn = db_with_indexed_clxn # Insert example documents @@ -369,6 +371,7 @@ def results_ready(): assert {doc[0]["id"] for doc in results[1]} == {"1", "2"} +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_retrieve_docs_with_threshold(db_with_indexed_clxn, example_documents): db, clxn = db_with_indexed_clxn # Insert example documents @@ -390,6 +393,7 @@ def results_ready(): assert all([doc[1] >= 0.7 for doc in results[0]]) +@skip_on_missing_imports(["pymongo", "sentence_transformers"], "retrievechat-mongodb") def test_wait_until_document_ready(collection_name, example_documents): database = MongoClient(MONGODB_URI)[MONGODB_DATABASE] _empty_collections_and_delete_indexes(database, [collection_name], wait=True) diff --git a/test/agentchat/contrib/vectordb/test_pgvectordb.py b/test/agentchat/contrib/vectordb/test_pgvectordb.py index 47b6e6e50f..efff6359fd 100644 --- a/test/agentchat/contrib/vectordb/test_pgvectordb.py +++ b/test/agentchat/contrib/vectordb/test_pgvectordb.py @@ -11,25 +11,22 @@ import pytest from autogen.agentchat.contrib.vectordb.pgvectordb import PGVectorDB -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from ....conftest import reason with optional_import_block() as result: - import pgvector # noqa: F401 import psycopg - import sentence_transformers # noqa: F401 -skip = not result.is_successful - reason = "do not run on MacOS or windows OR dependency is not installed OR " + reason @pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, + sys.platform in ["darwin", "win32"], reason=reason, ) +@skip_on_missing_imports(["pgvector", "psycopg", "sentence_transformers"], "retrievechat-pgvector") def test_pgvector(): # test db config db_config = { diff --git a/test/agentchat/contrib/vectordb/test_qdrant.py b/test/agentchat/contrib/vectordb/test_qdrant.py index 20870edddc..5e0cb5dec8 100644 --- a/test/agentchat/contrib/vectordb/test_qdrant.py +++ b/test/agentchat/contrib/vectordb/test_qdrant.py @@ -8,22 +8,16 @@ import sys import uuid -import pytest - from autogen.agentchat.contrib.vectordb.qdrant import QdrantVectorDB -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports sys.path.append(os.path.join(os.path.dirname(__file__), "..")) with optional_import_block() as result: - from fastembed import TextEmbedding # noqa: F401 from qdrant_client import QdrantClient -skip = not result.is_successful - - -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["fastembed", "qdrant_client"], "retrievechat-qdrant") def test_qdrant(): # test create collection client = QdrantClient(location=":memory:") diff --git a/test/agentchat/test_cache_agent.py b/test/agentchat/test_cache_agent.py index 6349d08f23..390f3ed6e5 100644 --- a/test/agentchat/test_cache_agent.py +++ b/test/agentchat/test_cache_agent.py @@ -13,23 +13,13 @@ import autogen from autogen.agentchat import AssistantAgent, UserProxyAgent from autogen.cache import Cache -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from ..conftest import Credentials -with optional_import_block() as result: - from openai import OpenAI # noqa: F401 - -skip_tests = not result.is_successful - -with optional_import_block() as result: - import redis # noqa: F401 - -skip_redis_tests = not result.is_successful - @pytest.mark.openai -@pytest.mark.skipif(skip_tests, reason="openai not installed") +@skip_on_missing_imports(["openai"]) def test_legacy_disk_cache(credentials_gpt_4o_mini: Credentials): random_cache_seed = int.from_bytes(os.urandom(2), "big") start_time = time.time() @@ -83,7 +73,7 @@ def _test_redis_cache(credentials: Credentials): @pytest.mark.openai @pytest.mark.redis -@pytest.mark.skipif(skip_tests or skip_redis_tests, reason="redis not installed OR openai not installed") +@skip_on_missing_imports(["openai", "redis"], "redis") def test_redis_cache(credentials_gpt_4o_mini: Credentials): _test_redis_cache(credentials_gpt_4o_mini) @@ -91,7 +81,7 @@ def test_redis_cache(credentials_gpt_4o_mini: Credentials): @pytest.mark.skip(reason="Currently not working") @pytest.mark.gemini @pytest.mark.redis -@pytest.mark.skipif(skip_tests or skip_redis_tests, reason="redis not installed OR openai not installed") +@skip_on_missing_imports(["openai", "redis"], "redis") def test_redis_cache_gemini(credentials_gemini_pro: Credentials): _test_redis_cache(credentials_gemini_pro) @@ -99,13 +89,13 @@ def test_redis_cache_gemini(credentials_gemini_pro: Credentials): @pytest.mark.skip(reason="Currently not working") @pytest.mark.anthropic @pytest.mark.redis -@pytest.mark.skipif(skip_tests or skip_redis_tests, reason="redis not installed OR openai not installed") +@skip_on_missing_imports(["openai", "redis"], "redis") def test_redis_cache_anthropic(credentials_anthropic_claude_sonnet: Credentials): _test_redis_cache(credentials_anthropic_claude_sonnet) @pytest.mark.openai -@pytest.mark.skipif(skip_tests, reason="openai not installed") +@skip_on_missing_imports(["openai"]) def test_disk_cache(credentials_gpt_4o_mini: Credentials): random_cache_seed = int.from_bytes(os.urandom(2), "big") start_time = time.time() diff --git a/test/agentchat/test_function_call.py b/test/agentchat/test_function_call.py index 10ecccd47b..7c3b704000 100755 --- a/test/agentchat/test_function_call.py +++ b/test/agentchat/test_function_call.py @@ -13,19 +13,14 @@ import pytest import autogen -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from autogen.math_utils import eval_math_responses from ..conftest import Credentials, reason -with optional_import_block() as result: - from openai import OpenAI # noqa: F401 - -skip = not result.is_successful - @pytest.mark.openai -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["openai"]) def test_eval_math_responses(credentials_gpt_4o_mini: Credentials): functions = [ { @@ -220,9 +215,10 @@ def get_number(): @pytest.mark.openai @pytest.mark.skipif( - skip or not sys.version.startswith("3.10"), + not sys.version.startswith("3.10"), reason=reason, ) +@skip_on_missing_imports(["openai"]) def test_update_function(credentials_gpt_4o_mini: Credentials): llm_config = { "config_list": credentials_gpt_4o_mini.config_list, diff --git a/test/agentchat/test_math_user_proxy_agent.py b/test/agentchat/test_math_user_proxy_agent.py index a5781bc94a..2e9949909b 100755 --- a/test/agentchat/test_math_user_proxy_agent.py +++ b/test/agentchat/test_math_user_proxy_agent.py @@ -15,17 +15,13 @@ _add_print_to_last_line, _remove_print, ) -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from ..conftest import Credentials -with optional_import_block() as result: - from openai import OpenAI # noqa: F401 - -skip = not result.is_successful - @pytest.mark.openai +@skip_on_missing_imports(["openai"]) def test_math_user_proxy_agent( credentials_gpt_4o_mini: Credentials, ): diff --git a/test/agentchat/test_tool_calls.py b/test/agentchat/test_tool_calls.py index 9054754812..f769927aae 100755 --- a/test/agentchat/test_tool_calls.py +++ b/test/agentchat/test_tool_calls.py @@ -13,20 +13,16 @@ import pytest import autogen -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from autogen.math_utils import eval_math_responses from autogen.oai.client import TOOL_ENABLED from ..conftest import Credentials -with optional_import_block() as result: - from openai import OpenAI # noqa: F401 - -skip = not result.is_successful - @pytest.mark.openai -@pytest.mark.skipif(skip or not TOOL_ENABLED, reason="openai>=1.1.0 not installed or requested to skip") +@pytest.mark.skipif(not TOOL_ENABLED, reason="openai>=1.1.0 not installed or requested to skip") +@skip_on_missing_imports(["openai"]) def test_eval_math_responses(credentials_gpt_4o_mini: Credentials): config_list = credentials_gpt_4o_mini.config_list tools = [ @@ -80,7 +76,8 @@ def test_eval_math_responses(credentials_gpt_4o_mini: Credentials): @pytest.mark.openai -@pytest.mark.skipif(skip or not TOOL_ENABLED, reason="openai>=1.1.0 not installed or requested to skip") +@pytest.mark.skipif(not TOOL_ENABLED, reason="openai>=1.1.0 not installed or requested to skip") +@skip_on_missing_imports(["openai"]) def test_eval_math_responses_api_style_function(credentials_gpt_4o_mini: Credentials): config_list = credentials_gpt_4o_mini.config_list functions = [ @@ -131,9 +128,10 @@ def test_eval_math_responses_api_style_function(credentials_gpt_4o_mini: Credent @pytest.mark.openai @pytest.mark.skipif( - skip or not TOOL_ENABLED or not sys.version.startswith("3.10"), + not TOOL_ENABLED or not sys.version.startswith("3.10"), reason="do not run if openai is <1.1.0 or py!=3.10 or requested to skip", ) +@skip_on_missing_imports(["openai"]) def test_update_tool(credentials_gpt_4o: Credentials): llm_config = { "config_list": credentials_gpt_4o.config_list, diff --git a/test/cache/test_cache.py b/test/cache/test_cache.py index 762e7755b0..936eeb1373 100755 --- a/test/cache/test_cache.py +++ b/test/cache/test_cache.py @@ -10,13 +10,11 @@ from unittest.mock import ANY, MagicMock, patch from autogen.cache.cache import Cache -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports with optional_import_block() as result: from azure.cosmos import CosmosClient -skip_azure_cosmos = not result.is_successful - class TestCache(unittest.TestCase): def setUp(self): @@ -31,7 +29,7 @@ def setUp(self): "database_id": "autogen_cache", "container_id": "TestContainer", "cache_seed": "42", - "client": MagicMock(spec=CosmosClient) if not skip_azure_cosmos else MagicMock(), + "client": MagicMock(spec=CosmosClient) if result.is_successful else MagicMock(), } } @@ -42,7 +40,7 @@ def test_redis_cache_initialization(self, mock_cache_factory): mock_cache_factory.assert_called() @patch("autogen.cache.cache_factory.CacheFactory.cache_factory", return_value=MagicMock()) - @unittest.skipIf(skip_azure_cosmos, "requires azure.cosmos") + @skip_on_missing_imports(["azure.cosmos"], "cosmosdb") def test_cosmosdb_cache_initialization(self, mock_cache_factory): cache = Cache(self.cosmos_config) self.assertIsInstance(cache.cache, MagicMock) @@ -71,7 +69,7 @@ def context_manager_common(self, config): def test_redis_context_manager(self): self.context_manager_common(self.redis_config) - @unittest.skipIf(skip_azure_cosmos, "requires azure.cosmos") + @skip_on_missing_imports(["azure.cosmos"], "cosmosdb") def test_cosmos_context_manager(self): self.context_manager_common(self.cosmos_config) @@ -90,7 +88,7 @@ def get_set_common(self, config): def test_redis_get_set(self): self.get_set_common(self.redis_config) - @unittest.skipIf(skip_azure_cosmos, "requires azure.cosmos") + @skip_on_missing_imports(["azure.cosmos"], "cosmosdb") def test_cosmos_get_set(self): self.get_set_common(self.cosmos_config) @@ -104,7 +102,7 @@ def close_common(self, config): def test_redis_close(self): self.close_common(self.redis_config) - @unittest.skipIf(skip_azure_cosmos, "requires azure.cosmos") + @skip_on_missing_imports(["azure.cosmos"], "cosmosdb") def test_cosmos_close(self): self.close_common(self.cosmos_config) diff --git a/test/cache/test_cosmos_db_cache.py b/test/cache/test_cosmos_db_cache.py index 056cebc114..0adde1344f 100644 --- a/test/cache/test_cosmos_db_cache.py +++ b/test/cache/test_cosmos_db_cache.py @@ -11,26 +11,20 @@ from unittest.mock import MagicMock, patch from autogen.cache.cosmos_db_cache import CosmosDBCache -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports with optional_import_block() as result: - from azure.cosmos import CosmosClient # noqa: F401 from azure.cosmos.exceptions import CosmosResourceNotFoundError -skip_test = not result.is_successful - - +@skip_on_missing_imports(["azure.cosmos"], "cosmosdb") class TestCosmosDBCache(unittest.TestCase): def setUp(self): - if skip_test: - self.skipTest("requires azure.cosmos") - else: - self.seed = "42" - self.connection_string = "AccountEndpoint=https://example.documents.azure.com:443/;" - self.database_id = "autogen_cache" - self.container_id = "TestContainer" - self.client = MagicMock() + self.seed = "42" + self.connection_string = "AccountEndpoint=https://example.documents.azure.com:443/;" + self.database_id = "autogen_cache" + self.container_id = "TestContainer" + self.client = MagicMock() @patch("autogen.cache.cosmos_db_cache.CosmosClient.from_connection_string", return_value=MagicMock()) def test_init(self, mock_from_connection_string): diff --git a/test/cache/test_redis_cache.py b/test/cache/test_redis_cache.py index 16a1efb099..06d6693718 100755 --- a/test/cache/test_redis_cache.py +++ b/test/cache/test_redis_cache.py @@ -13,17 +13,11 @@ import pytest from autogen.cache.redis_cache import RedisCache -from autogen.import_utils import optional_import_block - -with optional_import_block() as result: - import redis # noqa: F401 - - -skip_redis_tests = not result.is_successful +from autogen.import_utils import skip_on_missing_imports @pytest.mark.redis -@pytest.mark.skipif(skip_redis_tests, reason="redis not installed") +@skip_on_missing_imports(["redis"], "redis") class TestRedisCache(unittest.TestCase): def setUp(self): self.seed = "test_seed" diff --git a/test/coding/test_user_defined_functions.py b/test/coding/test_user_defined_functions.py index ec4134f6f0..570972cc3b 100644 --- a/test/coding/test_user_defined_functions.py +++ b/test/coding/test_user_defined_functions.py @@ -11,13 +11,11 @@ from autogen.coding.base import CodeBlock from autogen.coding.func_with_reqs import FunctionWithRequirements, with_requirements from autogen.coding.local_commandline_code_executor import LocalCommandLineCodeExecutor -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports with optional_import_block() as result: import pandas -skip = not result.is_successful - classes_to_test = [LocalCommandLineCodeExecutor] @@ -57,7 +55,7 @@ def function_missing_reqs() -> "pandas.DataFrame": @pytest.mark.parametrize("cls", classes_to_test) -@pytest.mark.skipif(skip, reason="pandas not installed") +@skip_on_missing_imports(["pandas"], "test") def test_can_load_function_with_reqs(cls) -> None: with tempfile.TemporaryDirectory() as temp_dir: executor = cls(work_dir=temp_dir, functions=[load_data]) @@ -77,7 +75,7 @@ def test_can_load_function_with_reqs(cls) -> None: @pytest.mark.parametrize("cls", classes_to_test) -@pytest.mark.skipif(skip, reason="pandas not installed") +@skip_on_missing_imports(["pandas"], "test") def test_can_load_function(cls) -> None: with tempfile.TemporaryDirectory() as temp_dir: executor = cls(work_dir=temp_dir, functions=[add_two_numbers]) @@ -96,7 +94,7 @@ def test_can_load_function(cls) -> None: # TODO - only run this test for containerized executors, as the environment is not guaranteed to have pandas installed # It is common for the local environment to have pandas installed, so this test will not work as expected # @pytest.mark.parametrize("cls", classes_to_test) -# @pytest.mark.skipif(skip, reason="pandas not installed") +# @skip_on_missing_imports(["pandas"], "test") # def test_fails_for_missing_reqs(cls) -> None: # with tempfile.TemporaryDirectory() as temp_dir: # executor = cls(work_dir=temp_dir, functions=[function_missing_reqs]) @@ -112,7 +110,7 @@ def test_can_load_function(cls) -> None: @pytest.mark.parametrize("cls", classes_to_test) -@pytest.mark.skipif(skip, reason="pandas not installed") +@skip_on_missing_imports(["pandas"], "test") def test_fails_for_function_incorrect_import(cls) -> None: with tempfile.TemporaryDirectory() as temp_dir: executor = cls(work_dir=temp_dir, functions=[function_incorrect_import]) @@ -128,7 +126,7 @@ def test_fails_for_function_incorrect_import(cls) -> None: @pytest.mark.parametrize("cls", classes_to_test) -@pytest.mark.skipif(skip, reason="pandas not installed") +@skip_on_missing_imports(["pandas"], "test") def test_fails_for_function_incorrect_dep(cls) -> None: with tempfile.TemporaryDirectory() as temp_dir: executor = cls(work_dir=temp_dir, functions=[function_incorrect_dep]) diff --git a/test/interop/test_interoperability.py b/test/interop/test_interoperability.py index 210bcf3a75..3cf654305e 100644 --- a/test/interop/test_interoperability.py +++ b/test/interop/test_interoperability.py @@ -15,9 +15,6 @@ with optional_import_block(): from crewai_tools import FileReadTool -with optional_import_block(): - pass # type: ignore[import] - class TestInteroperability: @skip_on_missing_imports(["crewai_tools", "langchain", "pydantic_ai"], "interop") diff --git a/test/io/test_websockets.py b/test/io/test_websockets.py index 774fb306ab..1fc46cafc9 100644 --- a/test/io/test_websockets.py +++ b/test/io/test_websockets.py @@ -15,7 +15,7 @@ import autogen from autogen.cache.cache import Cache -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from autogen.io import IOWebsockets from autogen.messages.base_message import BaseMessage, wrap_message @@ -26,8 +26,6 @@ from websockets.exceptions import ConnectionClosed from websockets.sync.client import connect as ws_connect -skip_test = not result.is_successful - @wrap_message class TestTextMessage(BaseMessage): @@ -42,7 +40,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(self.text) -@pytest.mark.skipif(skip_test, reason="websockets module is not available") +@skip_on_missing_imports(["websockets"], "websockets") class TestConsoleIOWithWebsockets: def test_input_print(self) -> None: print() diff --git a/test/oai/test_anthropic.py b/test/oai/test_anthropic.py index fafe908fbc..3340d8b7ca 100644 --- a/test/oai/test_anthropic.py +++ b/test/oai/test_anthropic.py @@ -10,22 +10,18 @@ import pytest -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from autogen.oai.anthropic import AnthropicClient, _calculate_cost with optional_import_block() as result: from anthropic.types import Message, TextBlock -skip = not result.is_successful - from typing import List from pydantic import BaseModel from typing_extensions import Literal -reason = "Anthropic dependency not installed!" - @pytest.fixture def mock_completion(): @@ -56,7 +52,7 @@ def anthropic_client(): return AnthropicClient(api_key="dummy_api_key") -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["anthropic"], "anthropic") def test_initialization_missing_api_key(): os.environ.pop("ANTHROPIC_API_KEY", None) os.environ.pop("AWS_ACCESS_KEY", None) @@ -88,12 +84,12 @@ def anthropic_client_with_vertexai_credentials(): ) -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["anthropic"], "anthropic") def test_intialization(anthropic_client): assert anthropic_client.api_key == "dummy_api_key", "`api_key` should be correctly set in the config" -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["anthropic"], "anthropic") def test_intialization_with_aws_credentials(anthropic_client_with_aws_credentials): assert anthropic_client_with_aws_credentials.aws_access_key == "dummy_access_key", ( "`aws_access_key` should be correctly set in the config" @@ -109,7 +105,7 @@ def test_intialization_with_aws_credentials(anthropic_client_with_aws_credential ) -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["anthropic"], "anthropic") def test_initialization_with_vertexai_credentials(anthropic_client_with_vertexai_credentials): assert anthropic_client_with_vertexai_credentials.gcp_project_id == "dummy_project_id", ( "`gcp_project_id` should be correctly set in the config" @@ -123,7 +119,7 @@ def test_initialization_with_vertexai_credentials(anthropic_client_with_vertexai # Test cost calculation -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["anthropic"], "anthropic") def test_cost_calculation(mock_completion): completion = mock_completion( completion="Hi! My name is Claude.", @@ -136,7 +132,7 @@ def test_cost_calculation(mock_completion): ), "Cost should be $0.002025" -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["anthropic"], "anthropic") def test_load_config(anthropic_client): params = { "model": "claude-3-sonnet-20240229", @@ -158,7 +154,7 @@ def test_load_config(anthropic_client): assert result == expected_params, "Config should be correctly loaded" -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["anthropic"], "anthropic") def test_extract_json_response(anthropic_client): # Define test Pydantic model class Step(BaseModel): diff --git a/test/oai/test_bedrock.py b/test/oai/test_bedrock.py index cb4eb2986e..7563c634fa 100644 --- a/test/oai/test_bedrock.py +++ b/test/oai/test_bedrock.py @@ -8,15 +8,9 @@ import pytest -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from autogen.oai.bedrock import BedrockClient, oai_messages_to_bedrock_messages -with optional_import_block() as result: - import boto3 # noqa: F401 - from botocore.config import Config # noqa: F401 - -skip = not result.is_successful - # Fixtures for mock data @pytest.fixture @@ -42,18 +36,15 @@ def bedrock_client(): return client -skip_reason = "Amazon Bedrock dependency is not installed" - - # Test initialization and configuration -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["boto3", "botocore"], "bedrock") def test_initialization(): # Creation works without an api_key as it's handled in the parameter parsing BedrockClient() # Test parameters -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["boto3", "botocore"], "bedrock") def test_parsing_params(bedrock_client): # All parameters (with default values) params = { @@ -124,7 +115,7 @@ def test_parsing_params(bedrock_client): # Test text generation -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["boto3", "botocore"], "bedrock") @patch("autogen.oai.bedrock.BedrockClient.create") def test_create_response(mock_chat, bedrock_client): # Mock BedrockClient.chat response @@ -160,7 +151,7 @@ def test_create_response(mock_chat, bedrock_client): # Test functions/tools -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["boto3", "botocore"], "bedrock") @patch("autogen.oai.bedrock.BedrockClient.create") def test_create_response_with_tool_call(mock_chat, bedrock_client): # Mock BedrockClient.chat response @@ -224,7 +215,7 @@ def test_create_response_with_tool_call(mock_chat, bedrock_client): # Test message conversion from OpenAI to Bedrock format -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["boto3", "botocore"], "bedrock") def test_oai_messages_to_bedrock_messages(bedrock_client): # Test that the "name" key is removed and system messages converted to user message test_messages = [ diff --git a/test/oai/test_cerebras.py b/test/oai/test_cerebras.py index 7b134f573c..f92923c7f7 100644 --- a/test/oai/test_cerebras.py +++ b/test/oai/test_cerebras.py @@ -8,14 +8,9 @@ import pytest -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from autogen.oai.cerebras import CerebrasClient, calculate_cerebras_cost -with optional_import_block() as result: - from cerebras.cloud.sdk import Cerebras, Stream # noqa: F401 - -skip = not result.is_successful - # Fixtures for mock data @pytest.fixture @@ -36,11 +31,8 @@ def cerebras_client(): return CerebrasClient(api_key="fake_api_key") -skip_reason = "Cerebras dependency is not installed" - - # Test initialization and configuration -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["cerebras"], "cerebras") def test_initialization(): # Missing any api_key with pytest.raises(AssertionError) as assertinfo: @@ -56,13 +48,13 @@ def test_initialization(): # Test standard initialization -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["cerebras"], "cerebras") def test_valid_initialization(cerebras_client): assert cerebras_client.api_key == "fake_api_key", "Config api_key should be correctly set" # Test parameters -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["cerebras"], "cerebras") def test_parsing_params(cerebras_client): # All parameters params = { @@ -133,7 +125,7 @@ def test_parsing_params(cerebras_client): # Test cost calculation -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["cerebras"], "cerebras") def test_cost_calculation(mock_response): response = mock_response( text="Example response", @@ -155,7 +147,7 @@ def test_cost_calculation(mock_response): # Test text generation -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["cerebras"], "cerebras") @patch("autogen.oai.cerebras.CerebrasClient.create") def test_create_response(mock_chat, cerebras_client): # Mock CerebrasClient.chat response @@ -189,7 +181,7 @@ def test_create_response(mock_chat, cerebras_client): # Test functions/tools -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["cerebras"], "cerebras") @patch("autogen.oai.cerebras.CerebrasClient.create") def test_create_response_with_tool_call(mock_chat, cerebras_client): # Mock `cerebras_response = client.chat(**cerebras_params)` diff --git a/test/oai/test_client.py b/test/oai/test_client.py index 3f62cee7ca..5a0def3845 100755 --- a/test/oai/test_client.py +++ b/test/oai/test_client.py @@ -17,7 +17,7 @@ from autogen import OpenAIWrapper from autogen.cache.cache import Cache -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from autogen.oai.client import LEGACY_CACHE_DIR, LEGACY_DEFAULT_CACHE_SEED, OpenAIClient from ..conftest import Credentials @@ -31,11 +31,9 @@ if openai.__version__ >= "1.1.0": TOOL_ENABLED = True -skip = not result.is_successful - @pytest.mark.openai -@pytest.mark.skipif(skip, reason="openai>=1 not installed") +@skip_on_missing_imports(["openai"]) def test_aoai_chat_completion(credentials_azure_gpt_35_turbo: Credentials): config_list = credentials_azure_gpt_35_turbo.config_list client = OpenAIWrapper(config_list=config_list) @@ -54,7 +52,8 @@ def test_aoai_chat_completion(credentials_azure_gpt_35_turbo: Credentials): @pytest.mark.openai -@pytest.mark.skipif(skip or not TOOL_ENABLED, reason="openai>=1.1.0 not installed") +@pytest.mark.skipif(not TOOL_ENABLED, reason="openai>=1.1.0 not installed") +@skip_on_missing_imports(["openai"]) def test_oai_tool_calling_extraction(credentials_gpt_4o_mini: Credentials): client = OpenAIWrapper(config_list=credentials_gpt_4o_mini.config_list) response = client.create( @@ -87,7 +86,7 @@ def test_oai_tool_calling_extraction(credentials_gpt_4o_mini: Credentials): @pytest.mark.openai -@pytest.mark.skipif(skip, reason="openai>=1 not installed") +@skip_on_missing_imports(["openai"]) def test_chat_completion(credentials_gpt_4o_mini: Credentials): client = OpenAIWrapper(config_list=credentials_gpt_4o_mini.config_list) response = client.create(messages=[{"role": "user", "content": "1+1="}]) @@ -96,7 +95,7 @@ def test_chat_completion(credentials_gpt_4o_mini: Credentials): @pytest.mark.openai -@pytest.mark.skipif(skip, reason="openai>=1 not installed") +@skip_on_missing_imports(["openai"]) def test_completion(credentials_azure_gpt_35_turbo_instruct: Credentials): client = OpenAIWrapper(config_list=credentials_azure_gpt_35_turbo_instruct.config_list) response = client.create(prompt="1+1=") @@ -105,7 +104,7 @@ def test_completion(credentials_azure_gpt_35_turbo_instruct: Credentials): @pytest.mark.openai -@pytest.mark.skipif(skip, reason="openai>=1 not installed") +@skip_on_missing_imports(["openai"]) @pytest.mark.parametrize( "cache_seed", [ @@ -120,7 +119,7 @@ def test_cost(credentials_azure_gpt_35_turbo_instruct: Credentials, cache_seed): @pytest.mark.openai -@pytest.mark.skipif(skip, reason="openai>=1 not installed") +@skip_on_missing_imports(["openai"]) def test_customized_cost(credentials_azure_gpt_35_turbo_instruct: Credentials): config_list = credentials_azure_gpt_35_turbo_instruct.config_list for config in config_list: @@ -133,7 +132,7 @@ def test_customized_cost(credentials_azure_gpt_35_turbo_instruct: Credentials): @pytest.mark.openai -@pytest.mark.skipif(skip, reason="openai>=1 not installed") +@skip_on_missing_imports(["openai"]) def test_usage_summary(credentials_azure_gpt_35_turbo_instruct: Credentials): client = OpenAIWrapper(config_list=credentials_azure_gpt_35_turbo_instruct.config_list) response = client.create(prompt="1+3=", cache_seed=None) @@ -165,7 +164,7 @@ def test_usage_summary(credentials_azure_gpt_35_turbo_instruct: Credentials): @pytest.mark.openai -@pytest.mark.skipif(skip, reason="openai>=1 not installed") +@skip_on_missing_imports(["openai"]) def test_legacy_cache(credentials_gpt_4o_mini: Credentials): # Prompt to use for testing. prompt = "Write a 100 word summary on the topic of the history of human civilization." @@ -229,7 +228,7 @@ def test_legacy_cache(credentials_gpt_4o_mini: Credentials): @pytest.mark.openai -@pytest.mark.skipif(skip, reason="openai>=1 not installed") +@skip_on_missing_imports(["openai"]) def test_cache(credentials_gpt_4o_mini: Credentials): # Prompt to use for testing. prompt = "Write a 100 word summary on the topic of the history of artificial intelligence." diff --git a/test/oai/test_client_stream.py b/test/oai/test_client_stream.py index 9150703ce5..50591c9514 100755 --- a/test/oai/test_client_stream.py +++ b/test/oai/test_client_stream.py @@ -12,13 +12,11 @@ import pytest from autogen import OpenAIWrapper -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports -from ..conftest import Credentials, reason +from ..conftest import Credentials with optional_import_block() as result: - from openai import OpenAI # noqa: F401 - # raises exception if openai>=1 is installed and something is wrong with imports # otherwise the test will be skipped from openai.types.chat.chat_completion import ChatCompletionMessage # type: ignore [attr-defined] @@ -28,11 +26,9 @@ ChoiceDeltaToolCallFunction, ) -skip = not result.is_successful - @pytest.mark.openai -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["openai"]) def test_aoai_chat_completion_stream(credentials_gpt_4o_mini: Credentials) -> None: client = OpenAIWrapper(config_list=credentials_gpt_4o_mini.config_list) response = client.create(messages=[{"role": "user", "content": "2+2="}], stream=True) @@ -41,7 +37,7 @@ def test_aoai_chat_completion_stream(credentials_gpt_4o_mini: Credentials) -> No @pytest.mark.openai -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["openai"]) def test_chat_completion_stream(credentials_gpt_4o_mini: Credentials) -> None: client = OpenAIWrapper(config_list=credentials_gpt_4o_mini.config_list) response = client.create(messages=[{"role": "user", "content": "1+1="}], stream=True) @@ -84,7 +80,7 @@ def test__update_dict_from_chunk() -> None: @pytest.mark.openai -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["openai"]) def test__update_function_call_from_chunk() -> None: function_call_chunks = [ ChoiceDeltaFunctionCall(arguments=None, name="get_current_weather"), @@ -117,7 +113,7 @@ def test__update_function_call_from_chunk() -> None: @pytest.mark.openai -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["openai"]) def test__update_tool_calls_from_chunk() -> None: tool_calls_chunks = [ ChoiceDeltaToolCall( @@ -193,7 +189,7 @@ def test__update_tool_calls_from_chunk() -> None: @pytest.mark.openai -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["openai"]) def test_chat_functions_stream(credentials_gpt_4o_mini: Credentials) -> None: functions = [ { @@ -225,7 +221,7 @@ def test_chat_functions_stream(credentials_gpt_4o_mini: Credentials) -> None: @pytest.mark.openai -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["openai"]) def test_chat_tools_stream(credentials_gpt_4o_mini: Credentials) -> None: tools = [ { @@ -267,7 +263,7 @@ def test_chat_tools_stream(credentials_gpt_4o_mini: Credentials) -> None: @pytest.mark.openai -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["openai"]) def test_completion_stream(credentials_azure_gpt_35_turbo_instruct: Credentials) -> None: client = OpenAIWrapper(config_list=credentials_azure_gpt_35_turbo_instruct.config_list) response = client.create(prompt="1+1=", stream=True) diff --git a/test/oai/test_cohere.py b/test/oai/test_cohere.py index e4db3692e0..2e1742623d 100644 --- a/test/oai/test_cohere.py +++ b/test/oai/test_cohere.py @@ -10,24 +10,16 @@ import pytest -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from autogen.oai.cohere import CohereClient, calculate_cohere_cost -with optional_import_block() as result: - from cohere import Client as Cohere # noqa: F401 - -skip = not result.is_successful - - -reason = "Cohere dependency not installed!" - @pytest.fixture def cohere_client(): return CohereClient(api_key="dummy_api_key") -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["cohere"], "cohere") def test_initialization_missing_api_key(): os.environ.pop("COHERE_API_KEY", None) with pytest.raises( @@ -39,12 +31,12 @@ def test_initialization_missing_api_key(): CohereClient(api_key="dummy_api_key") -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["cohere"], "cohere") def test_intialization(cohere_client): assert cohere_client.api_key == "dummy_api_key", "`api_key` should be correctly set in the config" -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["cohere"], "cohere") def test_calculate_cohere_cost(): assert calculate_cohere_cost(0, 0, model="command-r") == 0.0, ( "Cost should be 0 for 0 input_tokens and 0 output_tokens" @@ -52,7 +44,7 @@ def test_calculate_cohere_cost(): assert calculate_cohere_cost(100, 200, model="command-r-plus") == 0.0033 -@pytest.mark.skipif(skip, reason=reason) +@skip_on_missing_imports(["cohere"], "cohere") def test_load_config(cohere_client): params = { "model": "command-r-plus", diff --git a/test/oai/test_custom_client.py b/test/oai/test_custom_client.py index 63c5be5433..772ca17e2a 100644 --- a/test/oai/test_custom_client.py +++ b/test/oai/test_custom_client.py @@ -8,13 +8,7 @@ import pytest from autogen import OpenAIWrapper -from autogen.import_utils import optional_import_block - -with optional_import_block() as result: - from openai import OpenAI # noqa: F401 - -skip = not result.is_successful - +from autogen.import_utils import skip_on_missing_imports TEST_COST = 20000000 TEST_CUSTOM_RESPONSE = "This is a custom response." @@ -24,6 +18,7 @@ TEST_MAX_LENGTH = 1000 +@skip_on_missing_imports(["openai"]) def test_custom_model_client(): class CustomModel: def __init__(self, config: dict, test_hook): @@ -92,6 +87,7 @@ def get_usage(response) -> dict: assert test_hook["max_length"] == TEST_MAX_LENGTH +@skip_on_missing_imports(["openai"]) def test_registering_with_wrong_class_name_raises_error(): class CustomModel: def __init__(self, config: dict): @@ -122,6 +118,7 @@ def get_usage(response) -> dict: client.register_model_client(model_client_cls=CustomModel) +@skip_on_missing_imports(["openai"]) def test_not_all_clients_registered_raises_error(): class CustomModel: def __init__(self, config: dict): @@ -169,6 +166,7 @@ def get_usage(response) -> dict: client.create(messages=[{"role": "user", "content": "2+2="}], cache_seed=None) +@skip_on_missing_imports(["openai"]) def test_registering_with_extra_config_args(): class CustomModel: def __init__(self, config: dict, test_hook): diff --git a/test/oai/test_gemini.py b/test/oai/test_gemini.py index 71a7cca346..be193af771 100644 --- a/test/oai/test_gemini.py +++ b/test/oai/test_gemini.py @@ -12,14 +12,10 @@ import pytest from pydantic import BaseModel -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from autogen.oai.gemini import GeminiClient with optional_import_block() as result: - import google.ai # noqa: F401 - import google.auth # noqa: F401 - import vertexai # noqa: F401 - from PIL import Image # noqa: F401 from google.api_core.exceptions import InternalServerError from google.auth.credentials import Credentials from google.cloud.aiplatform.initializer import global_config as vertexai_global_config @@ -29,8 +25,6 @@ from vertexai.generative_models import HarmCategory as VertexAIHarmCategory from vertexai.generative_models import SafetySetting as VertexAISafetySetting -skip = not result.is_successful - # Fixtures for mock data @pytest.fixture @@ -69,7 +63,9 @@ def gemini_client_with_credentials(): # Test compute location initialization and configuration -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_compute_location_initialization(): with pytest.raises(AssertionError): GeminiClient( @@ -78,7 +74,9 @@ def test_compute_location_initialization(): # Test project initialization and configuration -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_project_initialization(): with pytest.raises(AssertionError): GeminiClient( @@ -86,12 +84,16 @@ def test_project_initialization(): ) # Should raise an AssertionError due to specifying API key and compute location -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_valid_initialization(gemini_client): assert gemini_client.api_key == "fake_api_key", "API Key should be correctly set" -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_google_application_credentials_initialization(): GeminiClient(google_application_credentials="credentials.json", project_id="fake-project-id") assert os.environ["GOOGLE_APPLICATION_CREDENTIALS"] == "credentials.json", ( @@ -99,7 +101,9 @@ def test_google_application_credentials_initialization(): ) -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_vertexai_initialization(): mock_credentials = MagicMock(Credentials) GeminiClient(credentials=mock_credentials, project_id="fake-project-id", location="us-west1") @@ -108,7 +112,9 @@ def test_vertexai_initialization(): assert vertexai_global_config.credentials == mock_credentials, "Incorrect VertexAI credentials initialization" -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_gemini_message_handling(gemini_client): messages = [ {"role": "system", "content": "You are my personal assistant."}, @@ -145,7 +151,9 @@ def test_gemini_message_handling(gemini_client): assert converted_messages[i].parts[j].text == part, "Incorrect mapped message text" -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_gemini_empty_message_handling(gemini_client): messages = [ {"role": "system", "content": "You are my personal assistant."}, @@ -163,7 +171,9 @@ def test_gemini_empty_message_handling(gemini_client): assert converted_messages[-1].parts[0].text == "empty", "Empty message is not converted to 'empty' correctly" -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_vertexai_safety_setting_conversion(gemini_client): safety_settings = [ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"}, @@ -195,7 +205,9 @@ def compare_safety_settings(converted_safety_settings, expected_safety_settings) assert all(settings_comparison), "Converted safety settings are incorrect" -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_vertexai_default_safety_settings_dict(gemini_client): safety_settings = { VertexAIHarmCategory.HARM_CATEGORY_HARASSMENT: VertexAIHarmBlockThreshold.BLOCK_ONLY_HIGH, @@ -220,7 +232,9 @@ def compare_safety_settings(converted_safety_settings, expected_safety_settings) assert all(settings_comparison), "Converted safety settings are incorrect" -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_vertexai_safety_setting_list(gemini_client): harm_categories = [ VertexAIHarmCategory.HARM_CATEGORY_HARASSMENT, @@ -252,7 +266,9 @@ def compare_safety_settings(converted_safety_settings, expected_safety_settings) # Test error handling @patch("autogen.oai.gemini.genai") -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_internal_server_error_retry(mock_genai, gemini_client): mock_genai.GenerativeModel.side_effect = [InternalServerError("Test Error"), None] # First call fails # Mock successful response @@ -266,7 +282,9 @@ def test_internal_server_error_retry(mock_genai, gemini_client): # Test cost calculation -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_cost_calculation(gemini_client, mock_response): response = mock_response( text="Example response", @@ -278,7 +296,9 @@ def test_cost_calculation(gemini_client, mock_response): assert gemini_client.cost(response) > 0, "Cost should be correctly calculated as zero" -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) @patch("autogen.oai.gemini.genai.GenerativeModel") # @patch("autogen.oai.gemini.genai.configure") @patch("autogen.oai.gemini.calculate_gemini_cost") @@ -329,7 +349,9 @@ def test_create_response_with_text(mock_calculate_cost, mock_generative_model, g mock_calculate_cost.assert_called_once_with(False, 100, 50, "gemini-pro") -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) @patch("autogen.oai.gemini.GenerativeModel") @patch("autogen.oai.gemini.vertexai.init") @patch("autogen.oai.gemini.calculate_gemini_cost") @@ -381,7 +403,9 @@ def test_vertexai_create_response( mock_calculate_cost.assert_called_once_with(True, 100, 50, "gemini-pro") -@pytest.mark.skipif(skip, reason="Google GenAI dependency is not installed") +@skip_on_missing_imports( + ["vertexai", "PIL", "google.ai", "google.auth", "google.api", "google.cloud", "google.generativeai"], "gemini" +) def test_extract_json_response(gemini_client): # Define test Pydantic model class Step(BaseModel): diff --git a/test/oai/test_groq.py b/test/oai/test_groq.py index 2519cee0e7..824f4dc22a 100644 --- a/test/oai/test_groq.py +++ b/test/oai/test_groq.py @@ -8,14 +8,9 @@ import pytest -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from autogen.oai.groq import GroqClient, calculate_groq_cost -with optional_import_block() as result: - from groq import Groq # noqa: F401 - -skip = not result.is_successful - # Fixtures for mock data @pytest.fixture @@ -36,11 +31,8 @@ def groq_client(): return GroqClient(api_key="fake_api_key") -skip_reason = "Groq dependency is not installed" - - # Test initialization and configuration -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["groq"], "groq") def test_initialization(): # Missing any api_key with pytest.raises(AssertionError) as assertinfo: @@ -55,13 +47,13 @@ def test_initialization(): # Test standard initialization -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["groq"], "groq") def test_valid_initialization(groq_client): assert groq_client.api_key == "fake_api_key", "Config api_key should be correctly set" # Test parameters -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["groq"], "groq") def test_parsing_params(groq_client): # All parameters params = { @@ -142,7 +134,7 @@ def test_parsing_params(groq_client): # Test cost calculation -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["groq"], "groq") def test_cost_calculation(mock_response): response = mock_response( text="Example response", @@ -158,7 +150,7 @@ def test_cost_calculation(mock_response): # Test text generation -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["groq"], "groq") @patch("autogen.oai.groq.GroqClient.create") def test_create_response(mock_chat, groq_client): # Mock GroqClient.chat response @@ -192,7 +184,7 @@ def test_create_response(mock_chat, groq_client): # Test functions/tools -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["groq"], "groq") @patch("autogen.oai.groq.GroqClient.create") def test_create_response_with_tool_call(mock_chat, groq_client): # Mock `groq_response = client.chat(**groq_params)` diff --git a/test/oai/test_mistral.py b/test/oai/test_mistral.py index c8428d42ce..08dbde3c04 100644 --- a/test/oai/test_mistral.py +++ b/test/oai/test_mistral.py @@ -8,24 +8,9 @@ import pytest -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from autogen.oai.mistral import MistralAIClient, calculate_mistral_cost -with optional_import_block() as result: - from mistralai import ( - AssistantMessage, # noqa: F401 - Function, # noqa: F401 - FunctionCall, # noqa: F401 - Mistral, # noqa: F401 - SystemMessage, # noqa: F401 - ToolCall, # noqa: F401 - ToolMessage, # noqa: F401 - UserMessage, # noqa: F401 - ) - - -skip = not result.is_successful - # Fixtures for mock data @pytest.fixture @@ -47,7 +32,7 @@ def mistral_client(): # Test initialization and configuration -@pytest.mark.skipif(skip, reason="Mistral.AI dependency is not installed") +@skip_on_missing_imports(["mistralai"], "mistral") def test_initialization(): # Missing any api_key with pytest.raises(AssertionError) as assertinfo: @@ -63,13 +48,13 @@ def test_initialization(): # Test standard initialization -@pytest.mark.skipif(skip, reason="Mistral.AI dependency is not installed") +@skip_on_missing_imports(["mistralai"], "mistral") def test_valid_initialization(mistral_client): assert mistral_client.api_key == "fake_api_key", "Config api_key should be correctly set" # Test cost calculation -@pytest.mark.skipif(skip, reason="Mistral.AI dependency is not installed") +@skip_on_missing_imports(["mistralai"], "mistral") def test_cost_calculation(mock_response): response = mock_response( text="Example response", @@ -84,7 +69,7 @@ def test_cost_calculation(mock_response): # Test text generation -@pytest.mark.skipif(skip, reason="Mistral.AI dependency is not installed") +@skip_on_missing_imports(["mistralai"], "mistral") @patch("autogen.oai.mistral.MistralAIClient.create") def test_create_response(mock_chat, mistral_client): # Mock `mistral_response = client.chat.complete(**mistral_params)` @@ -118,7 +103,7 @@ def test_create_response(mock_chat, mistral_client): # Test functions/tools -@pytest.mark.skipif(skip, reason="Mistral.AI dependency is not installed") +@skip_on_missing_imports(["mistralai"], "mistral") @patch("autogen.oai.mistral.MistralAIClient.create") def test_create_response_with_tool_call(mock_chat, mistral_client): # Mock `mistral_response = client.chat.complete(**mistral_params)` diff --git a/test/oai/test_ollama.py b/test/oai/test_ollama.py index 41f7882bcd..e662989534 100644 --- a/test/oai/test_ollama.py +++ b/test/oai/test_ollama.py @@ -10,16 +10,9 @@ import pytest from pydantic import BaseModel -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from autogen.oai.ollama import OllamaClient, response_to_tool_call -with optional_import_block() as result: - import ollama # noqa: F401 - from fix_busted_json import repair_json # noqa: F401 - from ollama import Client # noqa: F401 - -skip = not result.is_successful - # Fixtures for mock data @pytest.fixture @@ -46,18 +39,15 @@ def ollama_client(): return client -skip_reason = "Ollama dependency is not installed" - - # Test initialization and configuration -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["ollama", "fix_busted_json"], "ollama") def test_initialization(): # Creation works without an api_key OllamaClient() # Test parameters -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["ollama", "fix_busted_json"], "ollama") def test_parsing_params(ollama_client): # All parameters (with default values) params = { @@ -121,7 +111,7 @@ def test_parsing_params(ollama_client): # Test text generation -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["ollama", "fix_busted_json"], "ollama") @patch("autogen.oai.ollama.OllamaClient.create") def test_create_response(mock_chat, ollama_client): # Mock OllamaClient.chat response @@ -155,7 +145,7 @@ def test_create_response(mock_chat, ollama_client): # Test functions/tools -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["ollama", "fix_busted_json"], "ollama") @patch("autogen.oai.ollama.OllamaClient.create") def test_create_response_with_tool_call(mock_chat, ollama_client): # Mock OllamaClient.chat response @@ -217,7 +207,7 @@ def test_create_response_with_tool_call(mock_chat, ollama_client): # Test function parsing with manual tool calling -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["ollama", "fix_busted_json"], "ollama") def test_manual_tool_calling_parsing(ollama_client): # Test the parsing of a tool call within the response content (fully correct) response_content = """[{"name": "weather_forecast", "arguments":{"location": "New York"}},{"name": "currency_calculator", "arguments":{"base_amount": 123.45, "quote_currency": "EUR", "base_currency": "USD"}}]""" @@ -265,7 +255,7 @@ def test_manual_tool_calling_parsing(ollama_client): # Test message conversion from OpenAI to Ollama format -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["ollama", "fix_busted_json"], "ollama") def test_oai_messages_to_ollama_messages(ollama_client): # Test that the "name" key is removed test_messages = [ @@ -310,7 +300,7 @@ def test_oai_messages_to_ollama_messages(ollama_client): # Test message conversion from OpenAI to Ollama format -@pytest.mark.skipif(skip, reason=skip_reason) +@skip_on_missing_imports(["ollama", "fix_busted_json"], "ollama") def test_extract_json_response(ollama_client): # Define test Pydantic model class Step(BaseModel): diff --git a/test/test_browser_utils.py b/test/test_browser_utils.py index f37ba1b076..57a2a82c54 100755 --- a/test/test_browser_utils.py +++ b/test/test_browser_utils.py @@ -16,7 +16,7 @@ import requests from autogen.browser_utils import SimpleTextBrowser -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports BLOG_POST_URL = "https://docs.ag2.ai/blog/2023-04-21-LLM-tuning-math" BLOG_POST_TITLE = "Does Model and Inference Parameter Matter in LLM Applications? - A Case Study for MATH - AG2" @@ -38,13 +38,7 @@ with optional_import_block() as result: - import markdownify # noqa: F401 - import pathvalidate # noqa: F401 import requests - from bs4 import BeautifulSoup # noqa: F401 - - -skip_all = not result.is_successful try: @@ -70,10 +64,7 @@ def downloads_folder(): yield downloads_folder -@pytest.mark.skipif( - skip_all, - reason="do not run if dependency is not installed", -) +@skip_on_missing_imports(["markdownify", "pathvalidate", "requests", "bs4"], "websurfer") def test_simple_text_browser(downloads_folder: str): # Instantiate the browser user_agent = "python-requests/" + requests.__version__ @@ -159,9 +150,10 @@ def test_simple_text_browser(downloads_folder: str): @pytest.mark.skipif( - skip_all or skip_bing, + skip_bing, reason="do not run bing tests if key is missing", ) +@skip_on_missing_imports(["markdownify", "pathvalidate", "requests", "bs4"], "websurfer") def test_bing_search(): # Instantiate the browser user_agent = "python-requests/" + requests.__version__ diff --git a/test/test_code_utils.py b/test/test_code_utils.py index 25fd9c1446..15b70fb9f2 100755 --- a/test/test_code_utils.py +++ b/test/test_code_utils.py @@ -30,7 +30,7 @@ infer_lang, is_docker_running, ) -from autogen.import_utils import optional_import_block +from autogen.import_utils import skip_on_missing_imports from .conftest import Credentials @@ -390,11 +390,8 @@ def test_create_virtual_env_with_extra_args(): assert venv_context.env_name == os.path.split(temp_dir)[1] +@skip_on_missing_imports(["openai"]) def _test_improve(credentials_all: Credentials): - with optional_import_block() as result: - import openai # noqa: F401 - if not result.is_successful: - return config_list = credentials_all.config_list improved, _ = improve_function( "autogen/math_utils.py", diff --git a/test/test_notebook.py b/test/test_notebook.py index adad2a7f1b..1713dd691d 100755 --- a/test/test_notebook.py +++ b/test/test_notebook.py @@ -11,13 +11,7 @@ import pytest -from autogen.import_utils import optional_import_block - -with optional_import_block() as result: - import openai # noqa: F401 - -skip = not result.is_successful - +from autogen.import_utils import skip_on_missing_imports here = os.path.abspath(os.path.dirname(__file__)) @@ -52,90 +46,100 @@ def run_notebook(input_nb, output_nb="executed_openai_notebook.ipynb", save=Fals @pytest.mark.openai @pytest.mark.skipif( - skip or not sys.version.startswith("3.13"), - reason="do not run if openai is not installed or py!=3.13", + not sys.version.startswith("3.13"), + reason="do not run if py!=3.13", ) +@skip_on_missing_imports(["openai"]) def test_agentchat_auto_feedback_from_code(save=False): run_notebook("agentchat_auto_feedback_from_code_execution.ipynb", save=save) @pytest.mark.openai @pytest.mark.skipif( - skip or not sys.version.startswith("3.11"), - reason="do not run if openai is not installed or py!=3.11", + not sys.version.startswith("3.11"), + reason="do not run if py!=3.11", ) +@skip_on_missing_imports(["openai"]) def _test_oai_completion(save=False): run_notebook("oai_completion.ipynb", save=save) @pytest.mark.openai @pytest.mark.skipif( - skip or not sys.version.startswith("3.12"), - reason="do not run if openai is not installed or py!=3.12", + not sys.version.startswith("3.12"), + reason="do not run if py!=3.12", ) +@skip_on_missing_imports(["openai"]) def test_agentchat_function_call(save=False): run_notebook("agentchat_function_call.ipynb", save=save) @pytest.mark.openai @pytest.mark.skipif( - skip or not sys.version.startswith("3.10"), - reason="do not run if openai is not installed or py!=3.10", + not sys.version.startswith("3.10"), + reason="do not run if py!=3.10", ) +@skip_on_missing_imports(["openai"]) def test_agentchat_function_call_currency_calculator(save=False): run_notebook("agentchat_function_call_currency_calculator.ipynb", save=save) @pytest.mark.openai @pytest.mark.skipif( - skip or not sys.version.startswith("3.13"), - reason="do not run if openai is not installed or py!=3.13", + not sys.version.startswith("3.13"), + reason="do not run if py!=3.13", ) +@skip_on_missing_imports(["openai"]) def test_agentchat_function_call_async(save=False): run_notebook("agentchat_function_call_async.ipynb", save=save) @pytest.mark.openai @pytest.mark.skipif( - skip or not sys.version.startswith("3.12"), - reason="do not run if openai is not installed or py!=3.12", + not sys.version.startswith("3.12"), + reason="do not run if py!=3.12", ) +@skip_on_missing_imports(["openai"]) def _test_agentchat_MathChat(save=False): # noqa: N802 run_notebook("agentchat_MathChat.ipynb", save=save) @pytest.mark.openai @pytest.mark.skipif( - skip or not sys.version.startswith("3.10"), - reason="do not run if openai is not installed or py!=3.10", + not sys.version.startswith("3.10"), + reason="do not run if py!=3.10", ) +@skip_on_missing_imports(["openai"]) def _test_oai_chatgpt_gpt4(save=False): run_notebook("oai_chatgpt_gpt4.ipynb", save=save) @pytest.mark.openai @pytest.mark.skipif( - skip or not sys.version.startswith("3.12"), - reason="do not run if openai is not installed or py!=3.12", + not sys.version.startswith("3.12"), + reason="do not run if py!=3.12", ) +@skip_on_missing_imports(["openai"]) def test_agentchat_groupchat_finite_state_machine(save=False): run_notebook("agentchat_groupchat_finite_state_machine.ipynb", save=save) @pytest.mark.openai @pytest.mark.skipif( - skip or not sys.version.startswith("3.11"), - reason="do not run if openai is not installed or py!=3.11", + not sys.version.startswith("3.11"), + reason="do not run if py!=3.11", ) +@skip_on_missing_imports(["openai"]) def test_agentchat_cost_token_tracking(save=False): run_notebook("agentchat_cost_token_tracking.ipynb", save=save) @pytest.mark.openai @pytest.mark.skipif( - skip or not sys.version.startswith("3.11"), - reason="do not run if openai is not installed or py!=3.11", + not sys.version.startswith("3.11"), + reason="do not run if py!=3.11", ) +@skip_on_missing_imports(["openai"]) def test_agentchat_groupchat_stateflow(save=False): run_notebook("agentchat_groupchat_stateflow.ipynb", save=save) diff --git a/test/test_retrieve_utils.py b/test/test_retrieve_utils.py index 2c957f7e7c..1f82538809 100755 --- a/test/test_retrieve_utils.py +++ b/test/test_retrieve_utils.py @@ -12,7 +12,7 @@ import pytest -from autogen.import_utils import optional_import_block +from autogen.import_utils import optional_import_block, skip_on_missing_imports from autogen.retrieve_utils import ( create_vector_db_from_dir, extract_text_from_pdf, @@ -25,19 +25,8 @@ ) from autogen.token_count_utils import count_token -with optional_import_block() as result: - import bs4 # noqa: F401 +with optional_import_block(): import chromadb - import markdownify # noqa: F401 - import pypdf # noqa: F401 - - -skip = not result.is_successful - -with optional_import_block() as result: - from unstructured.partition.auto import partition # noqa: F401 - -HAS_UNSTRUCTURED = result.is_successful test_dir = os.path.join(os.path.dirname(__file__), "test_files") expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities @@ -46,7 +35,7 @@ integration, testing, and deployment.""" -@pytest.mark.skipif(skip, reason="dependency is not installed") +@skip_on_missing_imports(["bs4", "chromadb", "markdownify", "pypdf"], "retrievechat") class TestRetrieveUtils: def test_split_text_to_chunks(self): long_text = "A" * 10000 @@ -137,6 +126,7 @@ def test_query_vector_db(self): results = query_vector_db(["autogen"], client=client) assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", [])) + @skip_on_missing_imports(["lancedb"], "unknown") def test_custom_vector_db(self): with optional_import_block() as result: import lancedb @@ -242,10 +232,7 @@ def test_retrieve_utils(self): print(results["ids"][0]) assert len(results["ids"][0]) == 4 - @pytest.mark.skipif( - not HAS_UNSTRUCTURED, - reason="do not run if unstructured is not installed", - ) + @skip_on_missing_imports(["unstructured"], "unknown") def test_unstructured(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") diff --git a/test/test_token_count.py b/test/test_token_count.py index 8461be8fb0..760df2cc5c 100755 --- a/test/test_token_count.py +++ b/test/test_token_count.py @@ -8,14 +8,7 @@ import pytest -from autogen.import_utils import optional_import_block - -with optional_import_block() as result: - from PIL import Image # noqa: F401 - -img_util_imported = result.is_successful - - +from autogen.import_utils import skip_on_missing_imports from autogen.token_count_utils import ( _num_token_from_messages, count_token, @@ -96,7 +89,7 @@ def test_num_token_from_messages(model: str, expected_count: int) -> None: assert _num_token_from_messages(messages=messages, model=model) == expected_count -@pytest.mark.skipif(not img_util_imported, reason="img_utils not imported") +@skip_on_missing_imports("PIL", "unknown") def test_num_tokens_from_gpt_image(): # mock num_tokens_from_gpt_image function base64_encoded_image = (