Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 70 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg

# Testing
.pytest_cache/
.coverage
htmlcov/
coverage.xml
*.cover
.hypothesis/
.tox/

# Virtual environments
venv/
env/
ENV/
.venv/
.env

# IDE
.vscode/
.idea/
*.swp
*.swo
*~

# OS
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db

# Claude Code settings
.claude/*

# Streamlit
.streamlit/

# Logs
*.log

# Temporary files
*.tmp
*.temp

# Build artifacts
*.whl
*.tar.gz
3,420 changes: 3,420 additions & 0 deletions poetry.lock

Large diffs are not rendered by default.

82 changes: 82 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
[tool.poetry]
name = "rephraise-email-generator"
version = "0.1.0"
description = "Professional email generator powered by OpenAI GPT-3"
authors = ["stefanrmmr"]
readme = "README.md"
packages = [{include = "src"}]

[tool.poetry.dependencies]
python = "^3.8"
streamlit = "^1.0.0"
openai = "^1.0.0"

[tool.poetry.group.test.dependencies]
pytest = "^7.0.0"
pytest-cov = "^4.0.0"
pytest-mock = "^3.10.0"

# Note: Use 'poetry run pytest' directly instead of scripts

[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
addopts = [
"--strict-markers",
"--strict-config",
"--cov=streamlit_app",
"--cov-report=html:htmlcov",
"--cov-report=xml:coverage.xml",
"--cov-report=term-missing",
"--cov-fail-under=30",
"-v"
]
markers = [
"unit: Unit tests",
"integration: Integration tests",
"slow: Slow running tests"
]
filterwarnings = [
"ignore::DeprecationWarning"
]

[tool.coverage.run]
source = ["streamlit_app.py"]
omit = [
"*/tests/*",
"*/test_*.py",
"*/*_test.py",
"*/conftest.py",
"*/__init__.py",
"*/venv/*",
"*/.venv/*"
]

[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"def __repr__",
"if self.debug:",
"if settings.DEBUG",
"raise AssertionError",
"raise NotImplementedError",
"if 0:",
"if __name__ == .__main__.:",
"class .*\\bProtocol\\):",
"@(abc\\.)?abstractmethod"
]
show_missing = true
skip_covered = false
precision = 2

[tool.coverage.html]
directory = "htmlcov"

[tool.coverage.xml]
output = "coverage.xml"
1 change: 1 addition & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Tests package initialization
148 changes: 148 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
"""
Shared pytest fixtures and configuration for the test suite.
"""

import os
import tempfile
import pytest
from unittest.mock import Mock, patch
from pathlib import Path


@pytest.fixture
def temp_dir():
"""Provide a temporary directory for tests."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)


@pytest.fixture
def mock_openai_api():
"""Mock OpenAI API responses."""
with patch('openai.Completion.create') as mock_create:
mock_response = Mock()
mock_response.get.return_value = [{'text': 'Mocked response text'}]
mock_create.return_value = mock_response
yield mock_create


@pytest.fixture
def mock_streamlit():
"""Mock Streamlit components for testing."""
with patch.multiple(
'streamlit',
text_input=Mock(return_value='test input'),
selectbox=Mock(return_value='formal'),
button=Mock(return_value=False),
write=Mock(),
markdown=Mock(),
image=Mock(),
subheader=Mock(),
expander=Mock(),
columns=Mock(return_value=[Mock(), Mock(), Mock(), Mock(), Mock()]),
spinner=Mock(),
set_page_config=Mock()
):
yield


@pytest.fixture
def mock_env_vars():
"""Mock environment variables."""
env_vars = {
'OPENAI_API_KEY': 'test-api-key'
}
with patch.dict(os.environ, env_vars):
yield env_vars


@pytest.fixture
def sample_email_data():
"""Provide sample email data for testing."""
return {
'sender': 'John Doe',
'recipient': 'Jane Smith',
'style': 'formal',
'contents': ['Test content 1', 'Test content 2']
}


@pytest.fixture
def mock_openai_completion_response():
"""Mock a complete OpenAI API completion response."""
return {
"choices": [
{
"text": "This is a professional and elaborate rewrite of your content.",
"index": 0,
"logprobs": None,
"finish_reason": "stop"
}
],
"created": 1234567890,
"id": "cmpl-test123",
"model": "text-davinci-002",
"object": "text_completion",
"usage": {
"completion_tokens": 20,
"prompt_tokens": 10,
"total_tokens": 30
}
}


@pytest.fixture(autouse=True)
def reset_openai_api_key():
"""Reset OpenAI API key after each test."""
original_key = os.environ.get('OPENAI_API_KEY')
yield
if original_key:
os.environ['OPENAI_API_KEY'] = original_key
elif 'OPENAI_API_KEY' in os.environ:
del os.environ['OPENAI_API_KEY']


@pytest.fixture
def capture_stdout():
"""Capture stdout for testing print statements."""
import io
import sys

old_stdout = sys.stdout
sys.stdout = captured_output = io.StringIO()
yield captured_output
sys.stdout = old_stdout


@pytest.fixture
def mock_file_operations():
"""Mock file system operations."""
with patch('pathlib.Path.exists', return_value=True), \
patch('pathlib.Path.read_text', return_value='test content'), \
patch('pathlib.Path.write_text') as mock_write:
yield mock_write


# Custom markers for organizing tests
pytest.mark.unit = pytest.mark.unit
pytest.mark.integration = pytest.mark.integration
pytest.mark.slow = pytest.mark.slow


# Pytest configuration hooks
def pytest_configure(config):
"""Configure pytest with custom settings."""
config.addinivalue_line("markers", "unit: Unit tests")
config.addinivalue_line("markers", "integration: Integration tests")
config.addinivalue_line("markers", "slow: Slow running tests")


def pytest_collection_modifyitems(config, items):
"""Modify test collection to add markers based on test location."""
for item in items:
# Add unit marker to tests in unit directory
if "unit" in str(item.fspath):
item.add_marker(pytest.mark.unit)
# Add integration marker to tests in integration directory
elif "integration" in str(item.fspath):
item.add_marker(pytest.mark.integration)
1 change: 1 addition & 0 deletions tests/integration/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Integration tests package initialization
57 changes: 57 additions & 0 deletions tests/test_setup_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
"""
Validation tests to ensure the testing infrastructure is properly configured.
"""

import pytest
import sys
from pathlib import Path

# Add the project root to the path to import the main module
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

import streamlit_app


class TestSetupValidation:
"""Test class to validate testing infrastructure setup."""

def test_pytest_is_working(self):
"""Basic test to ensure pytest is functioning."""
assert True

def test_imports_work(self):
"""Test that main application imports work."""
assert hasattr(streamlit_app, 'gen_mail_contents')
assert hasattr(streamlit_app, 'gen_mail_format')
assert hasattr(streamlit_app, 'main_gpt3emailgen')

def test_fixtures_are_available(self, temp_dir, sample_email_data):
"""Test that custom fixtures are working."""
assert temp_dir.exists()
assert isinstance(sample_email_data, dict)
assert 'sender' in sample_email_data

def test_mock_fixtures_work(self, mock_env_vars):
"""Test that mock fixtures are functioning."""
import os
assert os.environ.get('OPENAI_API_KEY') == 'test-api-key'

@pytest.mark.unit
def test_unit_marker_works(self):
"""Test that unit marker is applied."""
assert True

def test_coverage_integration(self):
"""Test that coverage is properly configured."""
# This test just ensures we can run with coverage
# The actual coverage report will be generated by pytest-cov
assert True

def test_mock_functionality(self, mock_openai_api):
"""Test that OpenAI mocking works."""
# Import and test that the mock is applied
import streamlit_app
result = streamlit_app.gen_mail_contents(['test content'])
mock_openai_api.assert_called()
assert isinstance(result, list)
1 change: 1 addition & 0 deletions tests/unit/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Unit tests package initialization