jira-webhook-llm/tests/test_core.py

254 lines
9.9 KiB
Python

import pytest
from fastapi import HTTPException
from jira_webhook_llm import app
from llm.models import JiraWebhookPayload
from unittest.mock import patch, MagicMock
import redis
from datetime import datetime, timedelta
import time
@pytest.fixture
def mock_jira_payload():
return {
"issueKey": "TEST-123",
"summary": "Test issue",
"description": "Test description",
"comment": "Test comment",
"labels": ["bug", "urgent"],
"status": "Open",
"assignee": "testuser",
"updated": "2025-07-14T00:00:00Z"
}
def test_error_handling_middleware(test_client, mock_jira_payload):
# Test 404 error handling
response = test_client.post("/nonexistent-endpoint", json={})
assert response.status_code == 404
assert "error_id" in response.json()
# Test validation error handling
invalid_payload = mock_jira_payload.copy()
invalid_payload.pop("issueKey")
response = test_client.post("/jira-webhook", json=invalid_payload)
assert response.status_code == 422
assert "details" in response.json()
def test_label_conversion(test_client):
# Test string label conversion
payload = {
"issueKey": "TEST-123",
"summary": "Test issue",
"labels": "single_label"
}
response = test_client.post("/jira-webhook", json=payload)
assert response.status_code == 200
# Test list label handling
payload["labels"] = ["label1", "label2"]
response = test_client.post("/jira-webhook", json=payload)
assert response.status_code == 200
def test_camel_case_handling(test_client):
# Test camelCase field names
payload = {
"issue_key": "TEST-123",
"summary": "Test issue",
"description": "Test description"
}
response = test_client.post("/jira-webhook", json=payload)
assert response.status_code == 200
def test_optional_fields(test_client):
# Test with only required fields
payload = {
"issueKey": "TEST-123",
"summary": "Test issue"
}
response = test_client.post("/jira-webhook", json=payload)
assert response.status_code == 200
# Test with all optional fields
payload.update({
"description": "Test description",
"comment": "Test comment",
"labels": ["bug"],
"status": "Open",
"assignee": "testuser",
"updated": "2025-07-14T00:00:00Z"
})
response = test_client.post("/jira-webhook", json=payload)
assert response.status_code == 200
def test_webhook_handler(test_client, mock_jira_payload):
# Test successful webhook handling
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
assert "response" in response.json()
def test_llm_test_endpoint(test_client):
# Test LLM test endpoint
response = test_client.post("/test-llm")
assert response.status_code == 200
assert "response" in response.json()
def test_retry_decorator():
# Test retry decorator functionality
@app.retry(max_retries=3)
async def failing_function():
raise Exception("Test error")
with pytest.raises(Exception):
failing_function()
def test_rate_limiting(test_client, mock_jira_payload):
"""Test rate limiting functionality"""
with patch('redis.Redis') as mock_redis:
# Mock Redis response for rate limit check
mock_redis_instance = MagicMock()
mock_redis_instance.zcard.return_value = 100 # Exceed limit
mock_redis.from_url.return_value = mock_redis_instance
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 429
assert "Too many requests" in response.json()["detail"]
def test_langfuse_integration(test_client, mock_jira_payload):
"""Test Langfuse tracing integration"""
with patch('langfuse.Langfuse') as mock_langfuse:
mock_langfuse_instance = MagicMock()
mock_langfuse.return_value = mock_langfuse_instance
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
mock_langfuse_instance.start_span.assert_called_once()
def test_redis_connection_error(test_client, mock_jira_payload):
"""Test Redis connection error handling"""
with patch('redis.Redis') as mock_redis:
mock_redis.side_effect = redis.ConnectionError("Connection failed")
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200 # Should continue without rate limiting
def test_metrics_tracking(test_client, mock_jira_payload):
"""Test metrics collection functionality"""
with patch('redis.Redis') as mock_redis:
mock_redis_instance = MagicMock()
mock_redis.from_url.return_value = mock_redis_instance
# Make multiple requests to test metrics
for _ in range(3):
test_client.post("/jira-webhook", json=mock_jira_payload)
# Verify metrics were updated
handler = app.dependency_overrides.get('get_webhook_handler')()
assert handler.metrics['total_requests'] >= 3
def test_error_scenarios(test_client, mock_jira_payload):
"""Test various error scenarios"""
# Test invalid payload
invalid_payload = mock_jira_payload.copy()
invalid_payload.pop('issueKey')
response = test_client.post("/jira-webhook", json=invalid_payload)
assert response.status_code == 422
# Test LLM processing failure
with patch('llm.chains.analysis_chain.ainvoke') as mock_llm:
mock_llm.side_effect = Exception("LLM failed")
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
assert "error" in response.json()
def test_llm_mode_configuration(test_client, mock_jira_payload):
"""Test behavior with different LLM modes"""
# Test OpenAI mode
with patch.dict('os.environ', {'LLM_MODE': 'openai'}):
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
# Test Ollama mode
with patch.dict('os.environ', {'LLM_MODE': 'ollama'}):
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
def test_langfuse_configuration(test_client, mock_jira_payload):
"""Test Langfuse enabled/disabled scenarios"""
# Test with Langfuse enabled
with patch.dict('os.environ', {'LANGFUSE_ENABLED': 'true'}):
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
# Test with Langfuse disabled
with patch.dict('os.environ', {'LANGFUSE_ENABLED': 'false'}):
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
def test_redis_configuration(test_client, mock_jira_payload):
"""Test Redis enabled/disabled scenarios"""
# Test with Redis enabled
with patch.dict('os.environ', {'REDIS_ENABLED': 'true'}):
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
# Test with Redis disabled
with patch.dict('os.environ', {'REDIS_ENABLED': 'false'}):
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
def test_validation_error_handling(test_client):
# Test missing required field
payload = {"summary": "Test issue"} # Missing issueKey
response = test_client.post("/jira-webhook", json=payload)
assert response.status_code == 422
assert "details" in response.json()
assert "issueKey" in response.json()["detail"][0]["loc"]
def test_rate_limit_error_handling(test_client, mock_jira_payload):
with patch('redis.Redis') as mock_redis:
mock_redis_instance = MagicMock()
mock_redis_instance.zcard.return_value = 100 # Exceed limit
mock_redis.from_url.return_value = mock_redis_instance
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 429
assert "Too many requests" in response.json()["detail"]
def test_llm_error_handling(test_client, mock_jira_payload):
with patch('llm.chains.analysis_chain.ainvoke') as mock_llm:
mock_llm.side_effect = Exception("LLM processing failed")
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
assert "error" in response.json()
assert "LLM processing failed" in response.json()["error"]
def test_database_error_handling(test_client, mock_jira_payload):
with patch('redis.Redis') as mock_redis:
mock_redis.side_effect = redis.ConnectionError("Database connection failed")
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
assert "Database connection failed" in response.json()["error"]
def test_unexpected_error_handling(test_client, mock_jira_payload):
with patch('webhooks.handlers.JiraWebhookHandler.handle_webhook') as mock_handler:
mock_handler.side_effect = Exception("Unexpected error")
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 500
assert "Unexpected error" in response.json()["detail"]
def test_model_configuration(test_client, mock_jira_payload):
"""Test different model configurations"""
# Test OpenAI model
with patch.dict('os.environ', {
'LLM_MODE': 'openai',
'OPENAI_MODEL': 'gpt-4'
}):
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200
# Test Ollama model
with patch.dict('os.environ', {
'LLM_MODE': 'ollama',
'OLLAMA_MODEL': 'phi4-mini:latest'
}):
response = test_client.post("/jira-webhook", json=mock_jira_payload)
assert response.status_code == 200