jira-webhook-llm/jira-webhook-llm.py
Ireneusz Bachanowicz 2763b40b60
Some checks are pending
CI/CD Pipeline / test (push) Waiting to run
Refactor Jira Webhook LLM integration
- Simplified the FastAPI application structure and improved error handling with middleware.
- Introduced a retry decorator for asynchronous functions to enhance reliability.
- Modularized the LLM initialization and prompt loading into separate functions for better maintainability.
- Updated Pydantic models for Jira webhook payload and analysis flags to ensure proper validation and structure.
- Implemented a structured logging configuration for better traceability and debugging.
- Added comprehensive unit tests for prompt loading, response validation, and webhook handling.
- Established a CI/CD pipeline with GitHub Actions for automated testing and coverage reporting.
- Enhanced the prompt template for LLM analysis to include specific instructions for handling escalations.
2025-07-13 13:19:10 +02:00

102 lines
3.3 KiB
Python

from fastapi import FastAPI, Request, HTTPException
from pydantic import BaseModel
from fastapi.responses import JSONResponse
from loguru import logger
import uuid
import sys
from typing import Optional
from datetime import datetime
import asyncio
from functools import wraps
from config import settings
from webhooks.handlers import JiraWebhookHandler
from llm.models import JiraWebhookPayload
from logging_config import configure_logging
try:
app = FastAPI()
logger.info("FastAPI application initialized")
except Exception as e:
logger.error(f"Error initializing FastAPI: {str(e)}")
raise
# Initialize logging
configure_logging(log_level=settings.log.level)
def retry(max_retries: int = 3, delay: float = 1.0):
"""Decorator for retrying failed operations"""
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
last_error = None
for attempt in range(max_retries):
try:
return await func(*args, **kwargs)
except Exception as e:
last_error = e
logger.warning(f"Attempt {attempt + 1} failed: {str(e)}")
if attempt < max_retries - 1:
await asyncio.sleep(delay * (attempt + 1))
raise last_error
return wrapper
return decorator
class ErrorResponse(BaseModel):
error_id: str
timestamp: str
status_code: int
message: str
details: Optional[str] = None
@app.middleware("http")
async def error_handling_middleware(request: Request, call_next):
request_id = str(uuid.uuid4())
logger.bind(request_id=request_id).info(f"Request started: {request.method} {request.url}")
try:
response = await call_next(request)
return response
except HTTPException as e:
logger.error(f"HTTP Error: {e.status_code} - {e.detail}")
error_response = ErrorResponse(
error_id=request_id,
timestamp=datetime.utcnow().isoformat(),
status_code=e.status_code,
message=e.detail,
details=str(e)
)
return JSONResponse(status_code=e.status_code, content=error_response.model_dump())
except Exception as e:
logger.error(f"Unexpected error: {str(e)}")
error_response = ErrorResponse(
error_id=request_id,
timestamp=datetime.utcnow().isoformat(),
status_code=500,
message="Internal Server Error",
details=str(e)
)
return JSONResponse(status_code=500, content=error_response.model_dump())
webhook_handler = JiraWebhookHandler()
@app.post("/jira-webhook")
async def jira_webhook_handler(payload: JiraWebhookPayload):
return await webhook_handler.handle_webhook(payload)
@app.post("/test-llm")
async def test_llm():
"""Test endpoint for LLM integration"""
test_payload = JiraWebhookPayload(
issueKey="TEST-123",
summary="Test issue",
description="This is a test issue for LLM integration",
comment="Testing OpenAI integration with Langfuse",
labels=["test"],
status="Open",
assignee="Tester",
updated="2025-07-04T21:40:00Z"
)
return await webhook_handler.handle_webhook(test_payload)
# To run this:
# 1. Start FastAPI: uvicorn main:app --host 0.0.0.0 --port 8000 --reload