106 lines
3.4 KiB
Python
106 lines
3.4 KiB
Python
from dotenv import load_dotenv
|
|
load_dotenv()
|
|
|
|
from fastapi import FastAPI, Request, HTTPException
|
|
from pydantic import BaseModel
|
|
from fastapi.responses import JSONResponse
|
|
from loguru import logger
|
|
import uuid
|
|
import sys
|
|
from typing import Optional
|
|
from datetime import datetime
|
|
import asyncio
|
|
from functools import wraps
|
|
|
|
from config import settings
|
|
from webhooks.handlers import JiraWebhookHandler
|
|
from llm.models import JiraWebhookPayload
|
|
from logging_config import configure_logging
|
|
|
|
# Initialize logging first
|
|
configure_logging(log_level="DEBUG")
|
|
|
|
try:
|
|
app = FastAPI()
|
|
logger.info("FastAPI application initialized")
|
|
except Exception as e:
|
|
logger.error(f"Error initializing FastAPI: {str(e)}")
|
|
raise
|
|
|
|
def retry(max_retries: int = 3, delay: float = 1.0):
|
|
"""Decorator for retrying failed operations"""
|
|
def decorator(func):
|
|
@wraps(func)
|
|
async def wrapper(*args, **kwargs):
|
|
last_error = None
|
|
for attempt in range(max_retries):
|
|
try:
|
|
return await func(*args, **kwargs)
|
|
except Exception as e:
|
|
last_error = e
|
|
logger.warning(f"Attempt {attempt + 1} failed: {str(e)}")
|
|
if attempt < max_retries - 1:
|
|
await asyncio.sleep(delay * (attempt + 1))
|
|
raise last_error
|
|
return wrapper
|
|
return decorator
|
|
|
|
class ErrorResponse(BaseModel):
|
|
error_id: str
|
|
timestamp: str
|
|
status_code: int
|
|
message: str
|
|
details: Optional[str] = None
|
|
|
|
@app.middleware("http")
|
|
async def error_handling_middleware(request: Request, call_next):
|
|
request_id = str(uuid.uuid4())
|
|
logger.bind(request_id=request_id).info(f"Request started: {request.method} {request.url}")
|
|
|
|
try:
|
|
response = await call_next(request)
|
|
return response
|
|
except HTTPException as e:
|
|
logger.error(f"HTTP Error: {e.status_code} - {e.detail}")
|
|
error_response = ErrorResponse(
|
|
error_id=request_id,
|
|
timestamp=datetime.utcnow().isoformat(),
|
|
status_code=e.status_code,
|
|
message=e.detail,
|
|
details=str(e)
|
|
)
|
|
return JSONResponse(status_code=e.status_code, content=error_response.model_dump())
|
|
except Exception as e:
|
|
logger.error(f"Unexpected error: {str(e)}")
|
|
error_response = ErrorResponse(
|
|
error_id=request_id,
|
|
timestamp=datetime.utcnow().isoformat(),
|
|
status_code=500,
|
|
message="Internal Server Error",
|
|
details=str(e)
|
|
)
|
|
return JSONResponse(status_code=500, content=error_response.model_dump())
|
|
webhook_handler = JiraWebhookHandler()
|
|
|
|
@app.post("/jira-webhook")
|
|
async def jira_webhook_handler(payload: JiraWebhookPayload):
|
|
return await webhook_handler.handle_webhook(payload)
|
|
|
|
@app.post("/test-llm")
|
|
async def test_llm():
|
|
"""Test endpoint for LLM integration"""
|
|
test_payload = JiraWebhookPayload(
|
|
issueKey="TEST-123",
|
|
summary="Test issue",
|
|
description="This is a test issue for LLM integration",
|
|
comment="Testing OpenAI integration with Langfuse",
|
|
labels=["test"],
|
|
status="Open",
|
|
assignee="Tester",
|
|
updated="2025-07-04T21:40:00Z"
|
|
)
|
|
return await webhook_handler.handle_webhook(test_payload)
|
|
|
|
if __name__ == "__main__":
|
|
import uvicorn
|
|
uvicorn.run(app, host="0.0.0.0", port=8000) |