jira-webhook-llm/tests/test_core.py

86 lines
3.6 KiB
Python

import pytest
from fastapi import HTTPException
from jira_webhook_llm import app
from llm.models import JiraWebhookPayload
from database.crud import create_analysis_record, get_analysis_by_id
from database.models import JiraAnalysis
from database.database import get_db
from unittest.mock import MagicMock # Import MagicMock
def test_error_handling_middleware(test_client, mock_jira_payload):
# Test 404 error handling
response = test_client.post("/nonexistent-endpoint", json={})
assert response.status_code == 404
assert "error_id" in response.json()
# Test validation error handling
invalid_payload = mock_jira_payload.copy()
invalid_payload.pop("issueKey")
response = test_client.post("/api/jira-webhook", json=invalid_payload)
assert response.status_code == 422
assert "details" in response.json()
def test_webhook_handler(setup_db, test_client, mock_full_jira_payload, monkeypatch):
# Mock the LLM analysis chain to avoid external calls
mock_chain = MagicMock()
mock_chain.ainvoke.return_value = { # Use ainvoke as per webhooks/handlers.py
"hasMultipleEscalations": False,
"customerSentiment": "neutral",
"analysisSummary": "Mock analysis summary.",
"actionableItems": ["Mock action item 1", "Mock action item 2"],
"analysisFlags": ["mock_flag"]
}
monkeypatch.setattr("llm.chains.analysis_chain", mock_chain)
# Test successful webhook handling with full payload
response = test_client.post("/api/jira-webhook", json=mock_full_jira_payload)
assert response.status_code == 200
response_data = response.json()
assert "status" in response_data
assert response_data["status"] in ["success", "skipped"]
if response_data["status"] == "success":
assert "analysis_flags" in response_data
# Validate database storage
from database.models import JiraAnalysis
from database.database import get_db
with get_db() as db:
record = db.query(JiraAnalysis).filter_by(issue_key=mock_full_jira_payload["issueKey"]).first()
assert record is not None
assert record.issue_summary == mock_full_jira_payload["summary"]
assert record.request_payload == mock_full_jira_payload
def test_llm_test_endpoint(test_client):
# Test LLM test endpoint
response = test_client.post("/api/test-llm")
assert response.status_code == 200
assert "response" in response.json()
def test_create_analysis_record_endpoint(setup_db, test_client, mock_full_jira_payload):
# Test successful creation of a new analysis record via API
response = test_client.post("/api/request", json=mock_full_jira_payload)
assert response.status_code == 201
response_data = response.json()
assert "message" in response_data
assert response_data["message"] == "Record created successfully"
assert "record_id" in response_data
# Verify the record exists in the database
with get_db() as db:
record = get_analysis_by_id(db, response_data["record_id"])
assert record is not None
assert record.issue_key == mock_full_jira_payload["issueKey"]
assert record.issue_summary == mock_full_jira_payload["summary"]
assert record.request_payload == mock_full_jira_payload
@pytest.mark.asyncio
async def test_retry_decorator():
# Test retry decorator functionality
from jira_webhook_llm import retry # Import decorator from main module
@retry(max_retries=3) # Use imported decorator
async def failing_function():
raise Exception("Test error")
with pytest.raises(Exception):
await failing_function()