Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,27 @@ password.encrypt
session.token
device.json
qq.txt

# Testing related
.pytest_cache/
.coverage
htmlcov/
coverage.xml

# Claude settings
.claude/*

# Build artifacts
build/
dist/
*.egg-info/

# Virtual environments
venv/
env/
.env

# IDE files
.idea/
*.swp
*.swo
2,134 changes: 2,134 additions & 0 deletions poetry.lock

Large diffs are not rendered by default.

85 changes: 85 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
[tool.poetry]
name = "eloise"
version = "0.1.0"
description = "A Python project with RWKV model support"
authors = ["Your Name <your.email@example.com>"]
readme = "README.md"
packages = [{include = "model"}]

[tool.poetry.dependencies]
python = "^3.8"
torch = "^2.0.0"
flask = "^2.0.0"
requests = "^2.25.0"
langid = "^1.1.6"
translate = "^3.6.1"
imgkit = "^1.2.2"
markdown = "^3.3.0"
pygments = "^2.9.0"

[tool.poetry.group.dev.dependencies]
pytest = "^7.4.0"
pytest-cov = "^4.1.0"
pytest-mock = "^3.11.1"

[tool.poetry.scripts]
test = "pytest:main"
tests = "pytest:main"

[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
addopts = [
"--strict-markers",
"--strict-config",
"--verbose",
"--cov=model",
"--cov=.",
"--cov-report=term-missing",
"--cov-report=html:htmlcov",
"--cov-report=xml:coverage.xml",
"--cov-fail-under=80"
]
markers = [
"unit: marks tests as unit tests",
"integration: marks tests as integration tests",
"slow: marks tests as slow (may skip in CI)"
]

[tool.coverage.run]
source = [".", "model"]
omit = [
"tests/*",
"*/tests/*",
"*/__pycache__/*",
"*/venv/*",
"*/env/*",
"setup.py",
"conftest.py"
]

[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"def __repr__",
"if self.debug:",
"if settings.DEBUG",
"raise AssertionError",
"raise NotImplementedError",
"if 0:",
"if __name__ == .__main__.:"
]
precision = 2
show_missing = true

[tool.coverage.html]
directory = "htmlcov"

[tool.coverage.xml]
output = "coverage.xml"
Empty file added tests/__init__.py
Empty file.
118 changes: 118 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
"""
Shared pytest fixtures for the test suite.
"""
import os
import tempfile
import pytest
from unittest.mock import MagicMock, patch
from pathlib import Path


@pytest.fixture
def temp_dir():
"""Create a temporary directory for test files."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)


@pytest.fixture
def temp_file():
"""Create a temporary file for testing."""
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as tmp:
yield tmp.name
# Cleanup
if os.path.exists(tmp.name):
os.unlink(tmp.name)


@pytest.fixture
def mock_config():
"""Mock configuration for testing."""
return {
"model": {
"path": "/mock/model/path",
"device": "cpu",
"precision": "fp32"
},
"server": {
"host": "127.0.0.1",
"port": 5000,
"debug": False
}
}


@pytest.fixture
def mock_torch_device():
"""Mock torch device for testing without GPU requirements."""
with patch('torch.cuda.is_available', return_value=False):
with patch('torch.device', return_value=MagicMock()) as mock_device:
yield mock_device


@pytest.fixture
def mock_model():
"""Mock RWKV model for testing."""
mock_model = MagicMock()
mock_model.forward.return_value = MagicMock()
mock_model.eval = MagicMock()
return mock_model


@pytest.fixture
def mock_tokenizer():
"""Mock tokenizer for testing."""
mock_tokenizer = MagicMock()
mock_tokenizer.encode.return_value = [1, 2, 3, 4, 5]
mock_tokenizer.decode.return_value = "test output"
return mock_tokenizer


@pytest.fixture
def sample_text():
"""Sample text for testing text processing functions."""
return "This is a sample text for testing purposes."


@pytest.fixture
def mock_flask_app():
"""Mock Flask application for testing."""
with patch('flask.Flask') as mock_app:
app_instance = MagicMock()
mock_app.return_value = app_instance
yield app_instance


@pytest.fixture
def mock_requests():
"""Mock requests library for HTTP testing."""
with patch('requests.get') as mock_get, \
patch('requests.post') as mock_post:

# Setup default responses
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"status": "success"}
mock_response.text = "mock response"

mock_get.return_value = mock_response
mock_post.return_value = mock_response

yield {"get": mock_get, "post": mock_post, "response": mock_response}


@pytest.fixture(autouse=True)
def reset_environment():
"""Reset environment variables after each test."""
original_env = dict(os.environ)
yield
# Restore original environment
os.environ.clear()
os.environ.update(original_env)


@pytest.fixture
def capture_logs(caplog):
"""Capture logs during test execution."""
with caplog.at_level("DEBUG"):
yield caplog
Empty file added tests/integration/__init__.py
Empty file.
100 changes: 100 additions & 0 deletions tests/test_setup_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
"""
Validation tests to verify the testing infrastructure setup works correctly.
"""
import pytest
import os
import sys
from pathlib import Path


class TestSetupValidation:
"""Test class to validate the testing infrastructure setup."""

def test_pytest_is_working(self):
"""Test that pytest is working correctly."""
assert True, "pytest is working"

def test_fixtures_are_available(self, temp_dir, mock_config):
"""Test that shared fixtures are available."""
# Test temp_dir fixture
assert isinstance(temp_dir, Path)
assert temp_dir.exists()
assert temp_dir.is_dir()

# Test mock_config fixture
assert isinstance(mock_config, dict)
assert "model" in mock_config
assert "server" in mock_config

def test_project_structure(self):
"""Test that the project has the expected structure."""
project_root = Path(__file__).parent.parent

# Check main project files
assert (project_root / "pyproject.toml").exists()
assert (project_root / "requirements.txt").exists()
assert (project_root / "README.md").exists()

# Check model directory
assert (project_root / "model").exists()
assert (project_root / "model").is_dir()

# Check test structure
assert (project_root / "tests").exists()
assert (project_root / "tests" / "__init__.py").exists()
assert (project_root / "tests" / "conftest.py").exists()
assert (project_root / "tests" / "unit").exists()
assert (project_root / "tests" / "integration").exists()

def test_python_path(self):
"""Test that Python can import project modules."""
project_root = str(Path(__file__).parent.parent)
if project_root not in sys.path:
sys.path.insert(0, project_root)

# Test that we can import from model directory
try:
import model
assert hasattr(model, '__path__') or hasattr(model, '__file__')
except ImportError:
pytest.skip("Model module not available for import")

@pytest.mark.unit
def test_unit_marker(self):
"""Test that custom markers are working."""
assert True, "Unit marker is working"

@pytest.mark.integration
def test_integration_marker(self):
"""Test that custom markers are working."""
assert True, "Integration marker is working"

def test_mock_functionality(self, mock_requests):
"""Test that mocking functionality works."""
# Test mock requests fixture
response = mock_requests["get"]("http://example.com")
assert response.status_code == 200
assert response.json()["status"] == "success"

def test_environment_isolation(self):
"""Test that environment variables are properly isolated."""
# Set a test environment variable
os.environ["TEST_VAR"] = "test_value"
assert os.environ.get("TEST_VAR") == "test_value"
# The reset_environment fixture should clean this up after the test


@pytest.mark.slow
class TestPerformanceValidation:
"""Performance-related validation tests marked as slow."""

def test_slow_operation(self):
"""A test marked as slow to validate marker functionality."""
import time
time.sleep(0.1) # Simulate slow operation
assert True, "Slow test completed"


def test_module_level_function():
"""Test that module-level test functions are discovered."""
assert True, "Module-level function test works"
Empty file added tests/unit/__init__.py
Empty file.