feat: Update .gitignore to exclude .venv and modify config path for application.yml; enhance test setup with detailed logging and mock LLM analysis
Some checks are pending
CI/CD Pipeline / test (push) Waiting to run
Some checks are pending
CI/CD Pipeline / test (push) Waiting to run
This commit is contained in:
parent
1ff74e3ffb
commit
ff66181768
1
.gitignore
vendored
1
.gitignore
vendored
@ -12,6 +12,7 @@ __pycache__/
|
||||
.Python
|
||||
env/
|
||||
venv/
|
||||
.venv/
|
||||
*.egg
|
||||
*.egg-info/
|
||||
build/
|
||||
|
||||
1
.roo/mcp.json
Normal file
1
.roo/mcp.json
Normal file
@ -0,0 +1 @@
|
||||
{"mcpServers":{}}
|
||||
@ -147,7 +147,7 @@ class Settings:
|
||||
raise
|
||||
|
||||
def _load_yaml_config(self):
|
||||
config_path = Path('/root/development/jira-webhook-llm/config/application.yml')
|
||||
config_path = Path('config/application.yml')
|
||||
if not config_path.exists():
|
||||
logger.warning("Configuration file not found at {}", config_path)
|
||||
return {}
|
||||
|
||||
@ -41,7 +41,7 @@ class AnalysisFlags(BaseModel):
|
||||
logger.warning("Langfuse client is None despite being enabled")
|
||||
return
|
||||
|
||||
settings.langfuse_client.trace(
|
||||
settings.langfuse_client.start_span( # Use start_span
|
||||
name="LLM Model Usage",
|
||||
input=data,
|
||||
metadata={
|
||||
@ -51,7 +51,7 @@ class AnalysisFlags(BaseModel):
|
||||
"customerSentiment": self.customerSentiment
|
||||
}
|
||||
}
|
||||
)
|
||||
).end() # End the trace immediately as it's just for tracking model usage
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to track model usage: {e}")
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@ -1,34 +1,49 @@
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
from jira_webhook_llm import create_app
|
||||
from database.database import get_db_session, Base # Import get_db_session and Base
|
||||
|
||||
from sqlalchemy import create_engine, inspect
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
import os
|
||||
from sqlalchemy import create_engine
|
||||
from database.database import Base, get_db_session # Keep get_db_session for dependency override
|
||||
from fastapi import FastAPI
|
||||
from database import database as db # Import the database module directly
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def setup_db(monkeypatch):
|
||||
print("\n--- setup_db fixture started ---")
|
||||
# Use in-memory SQLite for tests
|
||||
test_db_url = "sqlite:///:memory:"
|
||||
monkeypatch.setenv("DATABASE_URL", test_db_url)
|
||||
|
||||
from database import database as db
|
||||
from database.models import Base # Import Base from models
|
||||
# Monkeypatch the global engine and SessionLocal in the database module
|
||||
engine = create_engine(test_db_url, connect_args={"check_same_thread": False})
|
||||
connection = engine.connect()
|
||||
|
||||
test_engine = create_engine(test_db_url, connect_args={"check_same_thread": False})
|
||||
# Update the global engine and SessionLocal
|
||||
db.engine = test_engine
|
||||
db.SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=test_engine)
|
||||
# Begin a transaction and bind the session to it
|
||||
transaction = connection.begin()
|
||||
|
||||
# Create all tables
|
||||
Base.metadata.create_all(bind=test_engine)
|
||||
# Monkeypatch the global engine and SessionLocal in the database module
|
||||
monkeypatch.setattr(db, 'engine', engine)
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=connection) # Bind to the connection
|
||||
monkeypatch.setattr(db, 'SessionLocal', SessionLocal)
|
||||
|
||||
yield test_engine
|
||||
from database.models import Base as ModelsBase # Renamed to avoid conflict with imported Base
|
||||
|
||||
# Cleanup
|
||||
Base.metadata.drop_all(bind=test_engine)
|
||||
# Create all tables within the same connection
|
||||
ModelsBase.metadata.create_all(bind=connection) # Use the connection here
|
||||
|
||||
# Verify table creation within setup_db
|
||||
inspector = inspect(connection) # Use the connection here
|
||||
if inspector.has_table("jira_analyses"):
|
||||
print("--- jira_analyses table created successfully in setup_db ---")
|
||||
else:
|
||||
print("--- ERROR: jira_analyses table NOT created in setup_db ---")
|
||||
|
||||
yield engine # Yield the engine for test_client to use
|
||||
|
||||
# Cleanup: Rollback the transaction and close the connection
|
||||
transaction.rollback() # Rollback to clean up data
|
||||
connection.close()
|
||||
print("--- setup_db fixture finished ---")
|
||||
|
||||
@pytest.fixture
|
||||
def mock_full_jira_payload(setup_db):
|
||||
mock_data = {
|
||||
@ -39,33 +54,52 @@ def mock_full_jira_payload(setup_db):
|
||||
"labels": ["test"],
|
||||
"status": "open",
|
||||
"assignee": "Tester",
|
||||
"updated": "2025-07-13T12:00:00Z",
|
||||
"payloadData": {"key1": "value1"}
|
||||
"updated": "2025-07-13T12:00:00Z"
|
||||
}
|
||||
return mock_data
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_client(setup_db, monkeypatch):
|
||||
# Prevent signal handling in tests to avoid "set_wakeup_fd" error
|
||||
print("\n--- test_client fixture started ---")
|
||||
# Prevent signal handling and lifespan initialization in tests
|
||||
monkeypatch.setattr("jira_webhook_llm.handle_shutdown_signal", lambda *args: None)
|
||||
monkeypatch.setattr("jira_webhook_llm.lifespan", lambda app: None)
|
||||
|
||||
# Create a test app instance
|
||||
app = create_app()
|
||||
# Create a test app instance without lifespan
|
||||
app = FastAPI()
|
||||
|
||||
# Import and include routers manually
|
||||
from webhooks.handlers import webhook_router
|
||||
from api.handlers import router as api_router
|
||||
app.include_router(webhook_router)
|
||||
app.include_router(api_router)
|
||||
|
||||
# Override the get_db_session dependency to use the test database
|
||||
# This will now correctly use the monkeypatched SessionLocal from database.database
|
||||
def override_get_db_session():
|
||||
with setup_db.connect() as connection:
|
||||
with sessionmaker(autocommit=False, autoflush=False, bind=connection)() as session:
|
||||
yield session
|
||||
db_session = db.SessionLocal() # Use the monkeypatched SessionLocal
|
||||
try:
|
||||
yield db_session
|
||||
finally:
|
||||
db_session.close()
|
||||
|
||||
app.dependency_overrides[get_db_session] = override_get_db_session
|
||||
|
||||
# Verify tables exist before running tests
|
||||
# Verify tables exist before running tests using the monkeypatched engine
|
||||
inspector = inspect(db.engine) # This will now inspect the engine bound to the single connection
|
||||
if inspector.has_table("jira_analyses"):
|
||||
print("--- jira_analyses table exists in test_client setup ---")
|
||||
else:
|
||||
print("--- ERROR: jira_analyses table NOT found in test_client setup ---")
|
||||
assert inspector.has_table("jira_analyses"), "Test tables not created"
|
||||
|
||||
with TestClient(app) as client:
|
||||
yield client
|
||||
|
||||
# Clean up dependency override
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
print("--- test_client fixture finished ---")
|
||||
|
||||
@pytest.fixture
|
||||
def mock_jira_payload():
|
||||
|
||||
@ -5,6 +5,7 @@ from llm.models import JiraWebhookPayload
|
||||
from database.crud import create_analysis_record, get_analysis_by_id
|
||||
from database.models import JiraAnalysis
|
||||
from database.database import get_db
|
||||
from unittest.mock import MagicMock # Import MagicMock
|
||||
|
||||
def test_error_handling_middleware(test_client, mock_jira_payload):
|
||||
# Test 404 error handling
|
||||
@ -19,7 +20,19 @@ def test_error_handling_middleware(test_client, mock_jira_payload):
|
||||
assert response.status_code == 422
|
||||
assert "details" in response.json()
|
||||
|
||||
def test_webhook_handler(setup_db, test_client, mock_full_jira_payload):
|
||||
def test_webhook_handler(setup_db, test_client, mock_full_jira_payload, monkeypatch):
|
||||
# Mock the LLM analysis chain to avoid external calls
|
||||
mock_chain = MagicMock()
|
||||
mock_chain.ainvoke.return_value = { # Use ainvoke as per webhooks/handlers.py
|
||||
"hasMultipleEscalations": False,
|
||||
"customerSentiment": "neutral",
|
||||
"analysisSummary": "Mock analysis summary.",
|
||||
"actionableItems": ["Mock action item 1", "Mock action item 2"],
|
||||
"analysisFlags": ["mock_flag"]
|
||||
}
|
||||
|
||||
monkeypatch.setattr("llm.chains.analysis_chain", mock_chain)
|
||||
|
||||
# Test successful webhook handling with full payload
|
||||
response = test_client.post("/api/jira-webhook", json=mock_full_jira_payload)
|
||||
assert response.status_code == 200
|
||||
|
||||
@ -4,7 +4,7 @@ import json
|
||||
from typing import Optional, List, Union
|
||||
from sqlalchemy.orm import Session
|
||||
from pydantic import BaseModel, ConfigDict, field_validator
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timezone # Import timezone
|
||||
import uuid
|
||||
|
||||
from config import settings
|
||||
@ -57,19 +57,19 @@ class JiraWebhookHandler:
|
||||
logger.bind(
|
||||
issue_key=payload.issueKey,
|
||||
record_id=new_record.id,
|
||||
timestamp=datetime.utcnow().isoformat()
|
||||
timestamp=datetime.now(timezone.utc).isoformat()
|
||||
).info(f"[{payload.issueKey}] Received webhook")
|
||||
|
||||
# Create Langfuse trace if enabled
|
||||
trace = None
|
||||
if settings.langfuse.enabled:
|
||||
trace = settings.langfuse_client.start_span(
|
||||
trace = settings.langfuse_client.start_span( # Use start_span
|
||||
name="Jira Webhook",
|
||||
input=payload.dict(),
|
||||
input=payload.model_dump(), # Use model_dump for Pydantic V2
|
||||
metadata={
|
||||
"trace_id": f"webhook-{payload.issueKey}",
|
||||
"issue_key": payload.issueKey,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
"timestamp": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
)
|
||||
|
||||
@ -105,18 +105,21 @@ class JiraWebhookHandler:
|
||||
|
||||
# Validate LLM response
|
||||
try:
|
||||
# Validate using Pydantic model
|
||||
AnalysisFlags(**raw_llm_response)
|
||||
# Validate using Pydantic model, extracting only relevant fields
|
||||
AnalysisFlags(
|
||||
hasMultipleEscalations=raw_llm_response.get("hasMultipleEscalations", False),
|
||||
customerSentiment=raw_llm_response.get("customerSentiment", "neutral")
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[{payload.issueKey}] Invalid LLM response structure: {str(e)}", exc_info=True)
|
||||
logger.error(f"[{payload.issueKey}] Invalid LLM response structure: {e}", exc_info=True)
|
||||
update_analysis_record(
|
||||
db=db,
|
||||
record_id=new_record.id,
|
||||
analysis_result={"hasMultipleEscalations": False, "customerSentiment": "neutral"},
|
||||
raw_response=str(raw_llm_response),
|
||||
raw_response=json.dumps(raw_llm_response), # Store as JSON string
|
||||
status="validation_failed"
|
||||
)
|
||||
raise ValueError(f"Invalid LLM response format: {str(e)}") from e
|
||||
raise ValueError(f"Invalid LLM response format: {e}") from e
|
||||
|
||||
logger.debug(f"[{payload.issueKey}] LLM Analysis Result: {json.dumps(raw_llm_response, indent=2)}")
|
||||
# Update record with final results
|
||||
@ -134,8 +137,7 @@ class JiraWebhookHandler:
|
||||
|
||||
# Log error to Langfuse if enabled
|
||||
if settings.langfuse.enabled and llm_span:
|
||||
llm_span.error(e)
|
||||
llm_span.end()
|
||||
llm_span.end(status_message=str(e), status="ERROR")
|
||||
|
||||
update_analysis_record(
|
||||
db=db,
|
||||
@ -163,21 +165,23 @@ class JiraWebhookHandler:
|
||||
|
||||
# Log error to Langfuse if enabled
|
||||
if settings.langfuse.enabled and trace:
|
||||
trace.end(error=e)
|
||||
trace.end(status_message=str(e), status="ERROR")
|
||||
|
||||
raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}")
|
||||
|
||||
# Initialize handler
|
||||
webhook_handler = JiraWebhookHandler()
|
||||
|
||||
@webhook_router.post("/jira-webhook")
|
||||
@webhook_router.post("/api/jira-webhook")
|
||||
async def jira_webhook_endpoint(payload: JiraWebhookPayload, db: Session = Depends(get_db_session)):
|
||||
"""Jira webhook endpoint"""
|
||||
try:
|
||||
result = await webhook_handler.handle_webhook(payload, db)
|
||||
return result
|
||||
except HTTPException:
|
||||
except ValidationError as e:
|
||||
raise
|
||||
except BadRequestError as e:
|
||||
raise ValidationError(detail=e.detail)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in webhook endpoint: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}")
|
||||
Loading…
x
Reference in New Issue
Block a user