Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
270 changes: 70 additions & 200 deletions README.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion openadapt/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
pip install openadapt[all] # Everything
"""

__version__ = "1.0.0"
__version__ = "1.0.6"

# Lazy imports to avoid pulling in heavy dependencies unless needed

Expand Down
2 changes: 1 addition & 1 deletion openadapt/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@


@click.group()
@click.version_option(version="1.0.0", prog_name="openadapt")
@click.version_option(version="1.0.6", prog_name="openadapt")
def main():
"""OpenAdapt - GUI automation with ML.

Expand Down
2 changes: 1 addition & 1 deletion openadapt/version.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
"""Version information for the OpenAdapt meta-package."""

__version__ = "1.0.0"
__version__ = "1.0.6"
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ classifiers = [
# Minimal base - just the CLI
dependencies = [
"click>=8.0.0",
"pydantic-settings>=2.0.0",
]

[project.optional-dependencies]
Expand Down
1 change: 1 addition & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Tests package for OpenAdapt."""
99 changes: 99 additions & 0 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
"""Tests for the openadapt CLI module."""

import pytest
from click.testing import CliRunner

from openadapt.cli import main, version, doctor


class TestCLI:
"""Test cases for the CLI functionality."""

def test_main_help(self):
"""Test that the main command shows help."""
runner = CliRunner()
result = runner.invoke(main, ["--help"])
assert result.exit_code == 0
assert "OpenAdapt - GUI automation with ML" in result.output

def test_version_option(self):
"""Test the version option works."""
runner = CliRunner()
result = runner.invoke(main, ["--version"])
assert result.exit_code == 0
assert "1.0.6" in result.output

def test_version_command(self):
"""Test the version command."""
runner = CliRunner()
result = runner.invoke(version)
assert result.exit_code == 0
assert "OpenAdapt Ecosystem Versions" in result.output
assert "openadapt:" in result.output

def test_doctor_command(self):
"""Test the doctor command runs."""
runner = CliRunner()
result = runner.invoke(doctor)
assert result.exit_code == 0
assert "OpenAdapt System Check" in result.output
assert "Python:" in result.output

def test_capture_help(self):
"""Test that capture commands show help."""
runner = CliRunner()
result = runner.invoke(main, ["capture", "--help"])
assert result.exit_code == 0
assert "Record GUI demonstrations" in result.output

def test_train_help(self):
"""Test that train commands show help."""
runner = CliRunner()
result = runner.invoke(main, ["train", "--help"])
assert result.exit_code == 0
assert "Train ML models" in result.output

def test_eval_help(self):
"""Test that eval commands show help."""
runner = CliRunner()
result = runner.invoke(main, ["eval", "--help"])
assert result.exit_code == 0
assert "Evaluate models" in result.output

def test_serve_help(self):
"""Test that serve command shows help."""
runner = CliRunner()
result = runner.invoke(main, ["serve", "--help"])
assert result.exit_code == 0
assert "Serve the training dashboard" in result.output

def test_capture_commands_require_optional_deps(self):
"""Test that capture commands fail gracefully without dependencies."""
runner = CliRunner()
# This should exit with error code 1 due to missing openadapt-capture
result = runner.invoke(main, ["capture", "list"])
assert result.exit_code == 1
assert "openadapt-capture not installed" in result.output

def test_train_commands_require_optional_deps(self):
"""Test that train commands fail gracefully without dependencies."""
runner = CliRunner()
# This should exit with error code 1 due to missing openadapt-ml
result = runner.invoke(main, ["train", "status"])
assert result.exit_code == 0 # status command doesn't require deps, just checks files

def test_eval_commands_require_optional_deps(self):
"""Test that eval commands fail gracefully without dependencies."""
runner = CliRunner()
# This should exit with error code 1 due to missing openadapt-evals
result = runner.invoke(main, ["eval", "mock", "--tasks", "1"])
assert result.exit_code == 1
assert "openadapt-evals not installed" in result.output

def test_serve_command_requires_optional_deps(self):
"""Test that serve command fails gracefully without dependencies."""
runner = CliRunner()
# This should exit with error code 1 due to missing openadapt-ml
result = runner.invoke(main, ["serve", "--port", "8081", "--no-open"])
assert result.exit_code == 1
assert "openadapt-ml not installed" in result.output
129 changes: 129 additions & 0 deletions tests/test_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
"""Tests for the openadapt config module."""

import os
import tempfile
from pathlib import Path
from unittest import mock

import pytest

from openadapt.config import OpenAdaptSettings, reload_settings, settings


class TestConfig:
"""Test cases for config functionality."""

def test_settings_instance_exists(self):
"""Test that global settings instance exists."""
assert settings is not None
assert isinstance(settings, OpenAdaptSettings)

def test_default_values(self):
"""Test that default values are set correctly."""
assert settings.default_model == "Qwen/Qwen3-VL-2B-Instruct"
assert settings.default_device == "auto"
assert settings.default_batch_size == 1
assert settings.eval_max_steps == 15
assert settings.server_port == 8080
assert settings.capture_audio is True
assert settings.capture_transcribe is False

def test_path_types(self):
"""Test that path settings return Path objects."""
assert isinstance(settings.capture_dir, Path)
assert isinstance(settings.training_output_dir, Path)
assert isinstance(settings.benchmark_results_dir, Path)
assert isinstance(settings.model_cache_dir, Path)
assert isinstance(settings.embedding_cache_dir, Path)

def test_default_paths(self):
"""Test that default paths are under home directory."""
home = Path.home()
assert str(settings.capture_dir).startswith(str(home))
assert str(settings.training_output_dir).startswith(str(home))
assert str(settings.benchmark_results_dir).startswith(str(home))
assert str(settings.model_cache_dir).startswith(str(home))

def test_ensure_directories(self):
"""Test that ensure_directories creates directories."""
# Use a temporary directory for testing
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
test_settings = OpenAdaptSettings(
capture_dir=temp_path / "captures",
training_output_dir=temp_path / "training",
benchmark_results_dir=temp_path / "benchmarks",
model_cache_dir=temp_path / "models",
embedding_cache_dir=temp_path / "embeddings",
)

# Directories shouldn't exist yet
assert not (temp_path / "captures").exists()
assert not (temp_path / "training").exists()

# Create directories
test_settings.ensure_directories()

# All directories should now exist
assert (temp_path / "captures").exists()
assert (temp_path / "training").exists()
assert (temp_path / "benchmarks").exists()
assert (temp_path / "models").exists()
assert (temp_path / "embeddings").exists()

def test_get_device_cpu_fallback(self):
"""Test that get_device returns cpu when torch is not available."""
test_settings = OpenAdaptSettings(default_device="auto")

# Mock torch import to fail
with mock.patch.dict('sys.modules', {'torch': None}):
with mock.patch('builtins.__import__', side_effect=ImportError):
device = test_settings.get_device()
assert device == "cpu"

def test_get_device_manual_override(self):
"""Test that get_device respects manual device setting."""
test_settings = OpenAdaptSettings(default_device="cuda")
device = test_settings.get_device()
assert device == "cuda"

def test_env_prefix(self):
"""Test that environment variables with OPENADAPT_ prefix are loaded."""
with mock.patch.dict(os.environ, {'OPENADAPT_SERVER_PORT': '9090'}):
test_settings = OpenAdaptSettings()
assert test_settings.server_port == 9090

def test_api_key_settings(self):
"""Test that API key settings exist."""
# These should be None by default
assert settings.anthropic_api_key is None
assert settings.openai_api_key is None
assert settings.google_api_key is None
assert settings.lambda_api_key is None

def test_reload_settings(self):
"""Test that reload_settings creates a new instance."""
original_settings = settings
reloaded = reload_settings()

assert isinstance(reloaded, OpenAdaptSettings)
# Should have same values
assert reloaded.default_model == original_settings.default_model
assert reloaded.server_port == original_settings.server_port

def test_azure_settings(self):
"""Test that Azure settings exist and default to None."""
assert settings.azure_subscription_id is None
assert settings.azure_ml_resource_group is None
assert settings.azure_ml_workspace_name is None
assert settings.azure_docker_image is None

def test_grounding_settings(self):
"""Test that grounding settings exist."""
assert settings.omniparser_url is None
assert settings.uitars_url is None

def test_retrieval_settings(self):
"""Test that retrieval settings have sensible defaults."""
assert settings.retrieval_embedding_dim == 512
assert settings.retrieval_top_k == 5
101 changes: 101 additions & 0 deletions tests/test_openadapt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
"""Tests for the main openadapt module and its lazy import functionality."""

import pytest

import openadapt


class TestOpenAdapt:
"""Test cases for main openadapt module."""

def test_version_attribute(self):
"""Test that openadapt has __version__ attribute."""
assert hasattr(openadapt, '__version__')
assert isinstance(openadapt.__version__, str)

def test_version_matches_expected(self):
"""Test that version matches expected value."""
# From the __init__.py file
assert openadapt.__version__ == "1.0.6"

def test_all_exports(self):
"""Test that __all__ contains expected exports."""
expected_exports = [
"__version__",
# From capture
"Capture",
"CaptureSession",
"Recorder",
"Action",
"EventType",
"MouseButton",
# From evals
"BenchmarkAdapter",
"BenchmarkTask",
"ApiAgent",
"evaluate_agent_on_benchmark",
# From viewer
"PageBuilder",
"HTMLBuilder",
# From ml
"QwenVLAdapter",
"Trainer",
# From grounding (optional)
"Grounder",
"OmniGrounder",
"GeminiGrounder",
# From retrieval (optional)
"DemoRetriever",
"DemoLibrary",
]

assert hasattr(openadapt, '__all__')
assert all(item in openadapt.__all__ for item in expected_exports)

def test_lazy_import_error_handling(self):
"""Test that lazy imports fail gracefully when dependencies are missing."""
# These should raise ImportError with helpful messages since the dependencies aren't installed

with pytest.raises(ImportError) as exc_info:
_ = openadapt.Capture
assert "openadapt_capture" in str(exc_info.value)

with pytest.raises(ImportError) as exc_info:
_ = openadapt.BenchmarkAdapter
assert "openadapt_evals" in str(exc_info.value)

with pytest.raises(ImportError) as exc_info:
_ = openadapt.PageBuilder
assert "openadapt_viewer" in str(exc_info.value)

with pytest.raises(ImportError) as exc_info:
_ = openadapt.QwenVLAdapter
assert "openadapt_ml" in str(exc_info.value)

def test_grounding_import_error_message(self):
"""Test that grounding imports show helpful error messages."""
with pytest.raises(ImportError) as exc_info:
_ = openadapt.Grounder
error_msg = str(exc_info.value)
assert "requires openadapt-grounding" in error_msg
assert "pip install openadapt[grounding]" in error_msg

def test_retrieval_import_error_message(self):
"""Test that retrieval imports show helpful error messages."""
with pytest.raises(ImportError) as exc_info:
_ = openadapt.DemoRetriever
error_msg = str(exc_info.value)
assert "requires openadapt-retrieval" in error_msg
assert "pip install openadapt[retrieval]" in error_msg

def test_invalid_attribute_error(self):
"""Test that accessing non-existent attributes raises AttributeError."""
with pytest.raises(AttributeError) as exc_info:
_ = openadapt.NonExistentClass
assert "module 'openadapt' has no attribute 'NonExistentClass'" in str(exc_info.value)

def test_docstring(self):
"""Test that module has proper docstring."""
assert openadapt.__doc__ is not None
assert "OpenAdapt - GUI automation with ML" in openadapt.__doc__
assert "pip install openadapt[" in openadapt.__doc__
28 changes: 28 additions & 0 deletions tests/test_version.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
"""Tests for the openadapt version module."""

import pytest

from openadapt import version


class TestVersion:
"""Test cases for version functionality."""

def test_version_exists(self):
"""Test that version module exists and has __version__."""
assert hasattr(version, '__version__')

def test_version_format(self):
"""Test that __version__ follows semantic versioning format."""
ver = version.__version__
# Should be in format like "1.0.0" or "1.0.0-dev"
parts = ver.split('.')
assert len(parts) >= 2, f"Version {ver} should have at least major.minor"

# First two parts should be integers
assert parts[0].isdigit(), f"Major version should be numeric: {parts[0]}"
assert parts[1].isdigit(), f"Minor version should be numeric: {parts[1]}"

def test_version_is_string(self):
"""Test that version is a string."""
assert isinstance(version.__version__, str)
Loading