feat: Enhance configuration loading and logging, implement graceful shutdown handling, and improve Langfuse integration
Some checks failed
CI/CD Pipeline / test (push) Has been cancelled
Some checks failed
CI/CD Pipeline / test (push) Has been cancelled
This commit is contained in:
parent
a3551d4233
commit
030df3e8e0
32
=3.2.0
Normal file
32
=3.2.0
Normal file
@ -0,0 +1,32 @@
|
||||
Requirement already satisfied: langfuse in ./venv/lib/python3.12/site-packages (3.1.3)
|
||||
Requirement already satisfied: backoff>=1.10.0 in ./venv/lib/python3.12/site-packages (from langfuse) (2.2.1)
|
||||
Requirement already satisfied: httpx<1.0,>=0.15.4 in ./venv/lib/python3.12/site-packages (from langfuse) (0.27.0)
|
||||
Requirement already satisfied: opentelemetry-api<2.0.0,>=1.33.1 in ./venv/lib/python3.12/site-packages (from langfuse) (1.34.1)
|
||||
Requirement already satisfied: opentelemetry-exporter-otlp<2.0.0,>=1.33.1 in ./venv/lib/python3.12/site-packages (from langfuse) (1.34.1)
|
||||
Requirement already satisfied: opentelemetry-sdk<2.0.0,>=1.33.1 in ./venv/lib/python3.12/site-packages (from langfuse) (1.34.1)
|
||||
Requirement already satisfied: packaging<25.0,>=23.2 in ./venv/lib/python3.12/site-packages (from langfuse) (24.2)
|
||||
Requirement already satisfied: pydantic<3.0,>=1.10.7 in ./venv/lib/python3.12/site-packages (from langfuse) (2.9.0)
|
||||
Requirement already satisfied: requests<3,>=2 in ./venv/lib/python3.12/site-packages (from langfuse) (2.32.4)
|
||||
Requirement already satisfied: wrapt<2.0,>=1.14 in ./venv/lib/python3.12/site-packages (from langfuse) (1.17.2)
|
||||
Requirement already satisfied: anyio in ./venv/lib/python3.12/site-packages (from httpx<1.0,>=0.15.4->langfuse) (4.9.0)
|
||||
Requirement already satisfied: certifi in ./venv/lib/python3.12/site-packages (from httpx<1.0,>=0.15.4->langfuse) (2025.6.15)
|
||||
Requirement already satisfied: httpcore==1.* in ./venv/lib/python3.12/site-packages (from httpx<1.0,>=0.15.4->langfuse) (1.0.9)
|
||||
Requirement already satisfied: idna in ./venv/lib/python3.12/site-packages (from httpx<1.0,>=0.15.4->langfuse) (3.10)
|
||||
Requirement already satisfied: sniffio in ./venv/lib/python3.12/site-packages (from httpx<1.0,>=0.15.4->langfuse) (1.3.1)
|
||||
Requirement already satisfied: h11>=0.16 in ./venv/lib/python3.12/site-packages (from httpcore==1.*->httpx<1.0,>=0.15.4->langfuse) (0.16.0)
|
||||
Requirement already satisfied: importlib-metadata<8.8.0,>=6.0 in ./venv/lib/python3.12/site-packages (from opentelemetry-api<2.0.0,>=1.33.1->langfuse) (8.7.0)
|
||||
Requirement already satisfied: typing-extensions>=4.5.0 in ./venv/lib/python3.12/site-packages (from opentelemetry-api<2.0.0,>=1.33.1->langfuse) (4.14.1)
|
||||
Requirement already satisfied: opentelemetry-exporter-otlp-proto-grpc==1.34.1 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.34.1)
|
||||
Requirement already satisfied: opentelemetry-exporter-otlp-proto-http==1.34.1 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.34.1)
|
||||
Requirement already satisfied: googleapis-common-protos~=1.52 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp-proto-grpc==1.34.1->opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.70.0)
|
||||
Requirement already satisfied: grpcio<2.0.0,>=1.63.2 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp-proto-grpc==1.34.1->opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.73.1)
|
||||
Requirement already satisfied: opentelemetry-exporter-otlp-proto-common==1.34.1 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp-proto-grpc==1.34.1->opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.34.1)
|
||||
Requirement already satisfied: opentelemetry-proto==1.34.1 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp-proto-grpc==1.34.1->opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.34.1)
|
||||
Requirement already satisfied: protobuf<6.0,>=5.0 in ./venv/lib/python3.12/site-packages (from opentelemetry-proto==1.34.1->opentelemetry-exporter-otlp-proto-grpc==1.34.1->opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (5.29.5)
|
||||
Requirement already satisfied: opentelemetry-semantic-conventions==0.55b1 in ./venv/lib/python3.12/site-packages (from opentelemetry-sdk<2.0.0,>=1.33.1->langfuse) (0.55b1)
|
||||
Requirement already satisfied: annotated-types>=0.4.0 in ./venv/lib/python3.12/site-packages (from pydantic<3.0,>=1.10.7->langfuse) (0.7.0)
|
||||
Requirement already satisfied: pydantic-core==2.23.2 in ./venv/lib/python3.12/site-packages (from pydantic<3.0,>=1.10.7->langfuse) (2.23.2)
|
||||
Requirement already satisfied: tzdata in ./venv/lib/python3.12/site-packages (from pydantic<3.0,>=1.10.7->langfuse) (2025.2)
|
||||
Requirement already satisfied: charset_normalizer<4,>=2 in ./venv/lib/python3.12/site-packages (from requests<3,>=2->langfuse) (3.4.2)
|
||||
Requirement already satisfied: urllib3<3,>=1.21.1 in ./venv/lib/python3.12/site-packages (from requests<3,>=2->langfuse) (2.5.0)
|
||||
Requirement already satisfied: zipp>=3.20 in ./venv/lib/python3.12/site-packages (from importlib-metadata<8.8.0,>=6.0->opentelemetry-api<2.0.0,>=1.33.1->langfuse) (3.23.0)
|
||||
62
config.py
62
config.py
@ -8,6 +8,8 @@ from watchfiles import watch, Change
|
||||
from threading import Thread
|
||||
from langfuse import Langfuse
|
||||
from langfuse.langchain import CallbackHandler
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
|
||||
class LangfuseConfig(BaseSettings):
|
||||
enabled: bool = True
|
||||
@ -87,16 +89,23 @@ class LLMConfig(BaseSettings):
|
||||
class Settings:
|
||||
def __init__(self):
|
||||
try:
|
||||
logger.info("Loading configuration from application.yml and environment variables")
|
||||
|
||||
# Load configuration from YAML file
|
||||
yaml_config = self._load_yaml_config()
|
||||
logger.info("Loaded YAML config: {}", yaml_config) # Add this log line
|
||||
|
||||
# Initialize configurations, allowing environment variables to override YAML
|
||||
logger.info("Initializing LogConfig")
|
||||
self.log = LogConfig()
|
||||
self.log = LogConfig(**yaml_config.get('log', {}))
|
||||
logger.info("LogConfig initialized: {}", self.log.model_dump())
|
||||
|
||||
logger.info("Initializing LLMConfig")
|
||||
self.llm = LLMConfig()
|
||||
self.llm = LLMConfig(**yaml_config.get('llm', {}))
|
||||
logger.info("LLMConfig initialized: {}", self.llm.model_dump())
|
||||
|
||||
logger.info("Initializing LangfuseConfig")
|
||||
self.langfuse = LangfuseConfig()
|
||||
self.langfuse = LangfuseConfig(**yaml_config.get('langfuse', {}))
|
||||
logger.info("LangfuseConfig initialized: {}", self.langfuse.model_dump())
|
||||
|
||||
logger.info("Validating configuration")
|
||||
@ -114,6 +123,18 @@ class Settings:
|
||||
logger.error("LangfuseConfig: {}", self.langfuse.model_dump() if hasattr(self, 'langfuse') else 'Not initialized')
|
||||
raise
|
||||
|
||||
def _load_yaml_config(self):
|
||||
config_path = Path('/root/development/jira-webhook-llm/config/application.yml')
|
||||
if not config_path.exists():
|
||||
logger.warning("Configuration file not found at {}", config_path)
|
||||
return {}
|
||||
try:
|
||||
with open(config_path, 'r') as f:
|
||||
return yaml.safe_load(f) or {}
|
||||
except Exception as e:
|
||||
logger.error("Error loading configuration from {}: {}", config_path, e)
|
||||
return {}
|
||||
|
||||
def _validate(self):
|
||||
logger.info("LLM mode set to: '{}'", self.llm.mode)
|
||||
|
||||
@ -124,13 +145,11 @@ class Settings:
|
||||
raise ValueError("LLM mode is 'openai', but OPENAI_API_BASE_URL is not set.")
|
||||
if not self.llm.openai_model:
|
||||
raise ValueError("LLM mode is 'openai', but OPENAI_MODEL is not set.")
|
||||
|
||||
elif self.llm.mode == 'ollama':
|
||||
if not self.llm.ollama_base_url:
|
||||
raise ValueError("LLM mode is 'ollama', but OLLAMA_BASE_URL is not set.")
|
||||
if not self.llm.ollama_model:
|
||||
raise ValueError("LLM mode is 'ollama', but OLLAMA_MODEL is not set.")
|
||||
|
||||
logger.info("Configuration validated successfully.")
|
||||
|
||||
def _init_langfuse(self):
|
||||
@ -161,11 +180,27 @@ class Settings:
|
||||
raise
|
||||
|
||||
# Initialize CallbackHandler
|
||||
self.langfuse_handler = CallbackHandler(
|
||||
public_key=self.langfuse.public_key,
|
||||
secret_key=self.langfuse.secret_key,
|
||||
host=self.langfuse.host
|
||||
)
|
||||
try:
|
||||
self.langfuse_handler = CallbackHandler(
|
||||
public_key=self.langfuse.public_key,
|
||||
secret_key=self.langfuse.secret_key,
|
||||
host=self.langfuse.host
|
||||
)
|
||||
except TypeError:
|
||||
try:
|
||||
# Fallback for older versions of langfuse.langchain.CallbackHandler
|
||||
self.langfuse_handler = CallbackHandler(
|
||||
public_key=self.langfuse.public_key,
|
||||
host=self.langfuse.host
|
||||
)
|
||||
logger.warning("Using fallback CallbackHandler initialization - secret_key parameter not supported")
|
||||
except TypeError:
|
||||
# Fallback for even older versions
|
||||
self.langfuse_handler = CallbackHandler(
|
||||
public_key=self.langfuse.public_key
|
||||
)
|
||||
logger.warning("Using minimal CallbackHandler initialization - only public_key parameter supported")
|
||||
logger.info("Langfuse client and handler initialized successfully")
|
||||
|
||||
logger.info("Langfuse client and handler initialized successfully")
|
||||
except ValueError as e:
|
||||
@ -182,8 +217,13 @@ class Settings:
|
||||
if change[0] == Change.modified:
|
||||
logger.info("Configuration file modified, reloading settings...")
|
||||
try:
|
||||
self.llm = LLMConfig()
|
||||
# Reload YAML config and re-initialize all settings
|
||||
yaml_config = self._load_yaml_config()
|
||||
self.log = LogConfig(**yaml_config.get('log', {}))
|
||||
self.llm = LLMConfig(**yaml_config.get('llm', {}))
|
||||
self.langfuse = LangfuseConfig(**yaml_config.get('langfuse', {}))
|
||||
self._validate()
|
||||
self._init_langfuse() # Re-initialize Langfuse client if needed
|
||||
logger.info("Configuration reloaded successfully")
|
||||
except Exception as e:
|
||||
logger.error("Error reloading configuration: {}", e)
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
|
||||
@ -20,12 +21,42 @@ from logging_config import configure_logging
|
||||
# Initialize logging first
|
||||
configure_logging(log_level="DEBUG")
|
||||
|
||||
import signal
|
||||
|
||||
try:
|
||||
app = FastAPI()
|
||||
logger.info("FastAPI application initialized")
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown_event():
|
||||
"""Handle application shutdown"""
|
||||
logger.info("Shutting down application...")
|
||||
try:
|
||||
# Cleanup Langfuse client if exists
|
||||
if hasattr(settings, 'langfuse_handler') and hasattr(settings.langfuse_handler, 'close'):
|
||||
try:
|
||||
await settings.langfuse_handler.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"Error closing handler: {str(e)}")
|
||||
logger.info("Cleanup completed successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Error during shutdown: {str(e)}")
|
||||
raise
|
||||
|
||||
def handle_shutdown_signal(signum, frame):
|
||||
"""Handle OS signals for graceful shutdown"""
|
||||
logger.info(f"Received signal {signum}, initiating shutdown...")
|
||||
# Exit immediately after cleanup is complete
|
||||
os._exit(0)
|
||||
|
||||
# Register signal handlers
|
||||
signal.signal(signal.SIGTERM, handle_shutdown_signal)
|
||||
signal.signal(signal.SIGINT, handle_shutdown_signal)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error initializing FastAPI: {str(e)}")
|
||||
raise
|
||||
logger.critical(f"Failed to initialize FastAPI: {str(e)}")
|
||||
logger.warning("Application cannot continue without FastAPI initialization")
|
||||
sys.exit(1)
|
||||
|
||||
def retry(max_retries: int = 3, delay: float = 1.0):
|
||||
"""Decorator for retrying failed operations"""
|
||||
@ -84,7 +115,14 @@ webhook_handler = JiraWebhookHandler()
|
||||
|
||||
@app.post("/jira-webhook")
|
||||
async def jira_webhook_handler(payload: JiraWebhookPayload):
|
||||
return await webhook_handler.handle_webhook(payload)
|
||||
logger.info(f"Received webhook payload: {payload.model_dump()}")
|
||||
try:
|
||||
response = await webhook_handler.handle_webhook(payload)
|
||||
logger.info(f"Webhook processed successfully")
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing webhook: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.post("/test-llm")
|
||||
async def test_llm():
|
||||
@ -101,6 +139,6 @@ async def test_llm():
|
||||
)
|
||||
return await webhook_handler.handle_webhook(test_payload)
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(app, host="0.0.0.0", port=8000)
|
||||
# if __name__ == "__main__":
|
||||
# import uvicorn
|
||||
# uvicorn.run(app, host="0.0.0.0", port=8000)
|
||||
@ -1,7 +1,7 @@
|
||||
from typing import Union
|
||||
from langchain_ollama import OllamaLLM
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
|
||||
from langchain_core.output_parsers import JsonOutputParser
|
||||
from loguru import logger
|
||||
import sys
|
||||
@ -45,14 +45,15 @@ elif settings.llm.mode == 'ollama':
|
||||
base_url=base_url,
|
||||
streaming=False,
|
||||
timeout=30, # 30 second timeout
|
||||
max_retries=3, # Retry up to 3 times
|
||||
temperature=0.1,
|
||||
top_p=0.2
|
||||
max_retries=3
|
||||
# , # Retry up to 3 times
|
||||
# temperature=0.1,
|
||||
# top_p=0.2
|
||||
)
|
||||
|
||||
# Test connection
|
||||
logger.debug("Testing Ollama connection...")
|
||||
llm.invoke("test") # Simple test request
|
||||
# llm.invoke("test") # Simple test request
|
||||
logger.info("Ollama connection established successfully")
|
||||
|
||||
except Exception as e:
|
||||
@ -84,30 +85,50 @@ parser = JsonOutputParser(pydantic_object=AnalysisFlags)
|
||||
def load_prompt_template(version="v1.1.0"):
|
||||
try:
|
||||
with open(f"llm/prompts/jira_analysis_{version}.txt", "r") as f:
|
||||
template = f.read()
|
||||
return PromptTemplate(
|
||||
template=template,
|
||||
input_variables=[
|
||||
"issueKey", "summary", "description", "status", "labels",
|
||||
"assignee", "updated", "comment"
|
||||
],
|
||||
partial_variables={"format_instructions": parser.get_format_instructions()},
|
||||
)
|
||||
template_content = f.read()
|
||||
|
||||
# Split system and user parts
|
||||
system_template, user_template = template_content.split("\n\nUSER:\n")
|
||||
system_template = system_template.replace("SYSTEM:\n", "").strip()
|
||||
|
||||
return ChatPromptTemplate.from_messages([
|
||||
SystemMessagePromptTemplate.from_template(system_template),
|
||||
HumanMessagePromptTemplate.from_template(user_template)
|
||||
])
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load prompt template: {str(e)}")
|
||||
raise
|
||||
|
||||
# Fallback prompt template
|
||||
FALLBACK_PROMPT = PromptTemplate(
|
||||
template="Please analyze this Jira ticket and provide a basic summary.",
|
||||
input_variables=["issueKey", "summary"]
|
||||
)
|
||||
FALLBACK_PROMPT = ChatPromptTemplate.from_messages([
|
||||
SystemMessagePromptTemplate.from_template(
|
||||
"Analyze Jira tickets and output JSON with hasMultipleEscalations, customerSentiment"
|
||||
),
|
||||
HumanMessagePromptTemplate.from_template(
|
||||
"Issue Key: {issueKey}\nSummary: {summary}"
|
||||
)
|
||||
])
|
||||
|
||||
# Create chain with fallback mechanism
|
||||
def create_analysis_chain():
|
||||
try:
|
||||
prompt_template = load_prompt_template()
|
||||
chain = prompt_template | llm | parser
|
||||
chain = (
|
||||
{
|
||||
"issueKey": lambda x: x["issueKey"],
|
||||
"summary": lambda x: x["summary"],
|
||||
"description": lambda x: x["description"],
|
||||
"status": lambda x: x["status"],
|
||||
"labels": lambda x: x["labels"],
|
||||
"assignee": lambda x: x["assignee"],
|
||||
"updated": lambda x: x["updated"],
|
||||
"comment": lambda x: x["comment"],
|
||||
"format_instructions": lambda _: parser.get_format_instructions()
|
||||
}
|
||||
| prompt_template
|
||||
| llm
|
||||
| parser
|
||||
)
|
||||
|
||||
# Add langfuse handler if enabled
|
||||
if settings.langfuse.enabled:
|
||||
@ -139,7 +160,8 @@ def validate_response(response: Union[dict, str]) -> bool:
|
||||
try:
|
||||
response = json.loads(response)
|
||||
except json.JSONDecodeError:
|
||||
return False
|
||||
logger.error(f"Invalid JSON response: {response}")
|
||||
raise ValueError("Invalid JSON response format")
|
||||
|
||||
# Ensure response is a dictionary
|
||||
if not isinstance(response, dict):
|
||||
|
||||
@ -29,9 +29,13 @@ class AnalysisFlags(BaseModel):
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
|
||||
# Track model usage if Langfuse is enabled
|
||||
if settings.langfuse.enabled:
|
||||
# Track model usage if Langfuse is enabled and client is available
|
||||
if settings.langfuse.enabled and hasattr(settings, 'langfuse_client'):
|
||||
try:
|
||||
if settings.langfuse_client is None:
|
||||
logger.warning("Langfuse client is None despite being enabled")
|
||||
return
|
||||
|
||||
settings.langfuse_client.trace(
|
||||
name="LLM Model Usage",
|
||||
input=data,
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
SYSTEM INSTRUCTIONS:
|
||||
SYSTEM:
|
||||
You are an AI assistant designed to analyze Jira ticket details containing email correspondence and extract key flags and sentiment, outputting information in a strict JSON format.
|
||||
|
||||
Your output MUST be ONLY a valid JSON object. Do NOT include any conversational text, explanations, or markdown outside the JSON.
|
||||
@ -14,8 +14,9 @@ Consider the overall context of the ticket and specifically the latest comment i
|
||||
-- Usually means that Customer is asking for help due to upcoming deadlines, other high priority issues which are blocked due to our stall.
|
||||
- Summarize the overall customer sentiment evident in the issue. Analyze tone of responses, happiness, gratefulness, irritation, etc.
|
||||
|
||||
{format_instructions}
|
||||
|
||||
USER CONTENT:
|
||||
USER:
|
||||
Issue Key: {issueKey}
|
||||
Summary: {summary}
|
||||
Description: {description}
|
||||
@ -23,6 +24,4 @@ Status: {status}
|
||||
Existing Labels: {labels}
|
||||
Assignee: {assignee}
|
||||
Last Updated: {updated}
|
||||
Latest Comment (if applicable): {comment}
|
||||
|
||||
{format_instructions}
|
||||
Latest Comment (if applicable): {comment}
|
||||
@ -5,11 +5,60 @@ from datetime import datetime
|
||||
from typing import Optional
|
||||
from loguru import logger
|
||||
|
||||
# Initialize logger with default configuration
|
||||
# Basic fallback logging configuration
|
||||
logger.remove()
|
||||
logger.add(sys.stderr, level="WARNING", format="{message}")
|
||||
logger.add(sys.stderr, level="WARNING", format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}")
|
||||
|
||||
def configure_logging(log_level: str = "INFO", log_dir: Optional[str] = None):
|
||||
"""Configure structured logging for the application with fallback handling"""
|
||||
try:
|
||||
# Log that we're attempting to configure logging
|
||||
logger.warning("Attempting to configure logging...")
|
||||
|
||||
# Default log directory
|
||||
if not log_dir:
|
||||
log_dir = os.getenv("LOG_DIR", "logs")
|
||||
|
||||
# Create log directory if it doesn't exist
|
||||
Path(log_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Log file path with timestamp
|
||||
log_file = Path(log_dir) / f"jira-webhook-llm_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
|
||||
|
||||
# Remove any existing loggers
|
||||
logger.remove()
|
||||
|
||||
# Add console logger
|
||||
logger.add(
|
||||
sys.stdout,
|
||||
level=log_level,
|
||||
format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {extra[request_id]} | {message}",
|
||||
colorize=True,
|
||||
backtrace=True,
|
||||
diagnose=True
|
||||
)
|
||||
|
||||
# Add file logger
|
||||
logger.add(
|
||||
str(log_file),
|
||||
level=log_level,
|
||||
format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {extra[request_id]} | {message}",
|
||||
rotation="100 MB",
|
||||
retention="30 days",
|
||||
compression="zip",
|
||||
backtrace=True,
|
||||
diagnose=True
|
||||
)
|
||||
|
||||
# Configure default extras
|
||||
logger.configure(extra={"request_id": "N/A"})
|
||||
|
||||
logger.info("Logging configured successfully")
|
||||
except Exception as e:
|
||||
# Fallback to basic logging if configuration fails
|
||||
logger.remove()
|
||||
logger.add(sys.stderr, level="WARNING", format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}")
|
||||
logger.error(f"Failed to configure logging: {str(e)}. Using fallback logging configuration.")
|
||||
"""Configure structured logging for the application"""
|
||||
|
||||
# Default log directory
|
||||
|
||||
@ -15,4 +15,5 @@ unittest2>=1.1.0
|
||||
pytest==8.2.0
|
||||
pytest-asyncio==0.23.5
|
||||
pytest-cov==4.1.0
|
||||
httpx==0.27.0
|
||||
httpx==0.27.0
|
||||
PyYAML
|
||||
@ -42,17 +42,15 @@ class JiraWebhookHandler:
|
||||
# Create Langfuse trace if enabled
|
||||
trace = None
|
||||
if settings.langfuse.enabled:
|
||||
trace = settings.langfuse_client.trace(
|
||||
Langfuse().trace(
|
||||
id=f"webhook-{payload.issueKey}",
|
||||
name="Jira Webhook",
|
||||
input=payload.dict(),
|
||||
metadata={
|
||||
trace = settings.langfuse_client.start_span(
|
||||
name="Jira Webhook",
|
||||
input=payload.dict(),
|
||||
metadata={
|
||||
"trace_id": f"webhook-{payload.issueKey}",
|
||||
"issue_key": payload.issueKey,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
llm_input = {
|
||||
"issueKey": payload.issueKey,
|
||||
@ -68,7 +66,7 @@ class JiraWebhookHandler:
|
||||
# Create Langfuse span for LLM processing if enabled
|
||||
llm_span = None
|
||||
if settings.langfuse.enabled and trace:
|
||||
llm_span = trace.span(
|
||||
llm_span = trace.start_span(
|
||||
name="LLM Processing",
|
||||
input=llm_input,
|
||||
metadata={
|
||||
@ -81,7 +79,8 @@ class JiraWebhookHandler:
|
||||
|
||||
# Update Langfuse span with output if enabled
|
||||
if settings.langfuse.enabled and llm_span:
|
||||
llm_span.end(output=analysis_result)
|
||||
llm_span.update(output=analysis_result)
|
||||
llm_span.end()
|
||||
|
||||
# Validate LLM response
|
||||
if not validate_response(analysis_result):
|
||||
@ -99,7 +98,8 @@ class JiraWebhookHandler:
|
||||
|
||||
# Log error to Langfuse if enabled
|
||||
if settings.langfuse.enabled and llm_span:
|
||||
llm_span.end(error=e)
|
||||
llm_span.error(e)
|
||||
llm_span.end()
|
||||
return {
|
||||
"status": "error",
|
||||
"analysis_flags": {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user