feat: Enhance configuration loading and logging, implement graceful shutdown handling, and improve Langfuse integration
Some checks failed
CI/CD Pipeline / test (push) Has been cancelled

This commit is contained in:
Ireneusz Bachanowicz 2025-07-14 01:07:20 +02:00
parent a3551d4233
commit 030df3e8e0
9 changed files with 242 additions and 57 deletions

32
=3.2.0 Normal file
View File

@ -0,0 +1,32 @@
Requirement already satisfied: langfuse in ./venv/lib/python3.12/site-packages (3.1.3)
Requirement already satisfied: backoff>=1.10.0 in ./venv/lib/python3.12/site-packages (from langfuse) (2.2.1)
Requirement already satisfied: httpx<1.0,>=0.15.4 in ./venv/lib/python3.12/site-packages (from langfuse) (0.27.0)
Requirement already satisfied: opentelemetry-api<2.0.0,>=1.33.1 in ./venv/lib/python3.12/site-packages (from langfuse) (1.34.1)
Requirement already satisfied: opentelemetry-exporter-otlp<2.0.0,>=1.33.1 in ./venv/lib/python3.12/site-packages (from langfuse) (1.34.1)
Requirement already satisfied: opentelemetry-sdk<2.0.0,>=1.33.1 in ./venv/lib/python3.12/site-packages (from langfuse) (1.34.1)
Requirement already satisfied: packaging<25.0,>=23.2 in ./venv/lib/python3.12/site-packages (from langfuse) (24.2)
Requirement already satisfied: pydantic<3.0,>=1.10.7 in ./venv/lib/python3.12/site-packages (from langfuse) (2.9.0)
Requirement already satisfied: requests<3,>=2 in ./venv/lib/python3.12/site-packages (from langfuse) (2.32.4)
Requirement already satisfied: wrapt<2.0,>=1.14 in ./venv/lib/python3.12/site-packages (from langfuse) (1.17.2)
Requirement already satisfied: anyio in ./venv/lib/python3.12/site-packages (from httpx<1.0,>=0.15.4->langfuse) (4.9.0)
Requirement already satisfied: certifi in ./venv/lib/python3.12/site-packages (from httpx<1.0,>=0.15.4->langfuse) (2025.6.15)
Requirement already satisfied: httpcore==1.* in ./venv/lib/python3.12/site-packages (from httpx<1.0,>=0.15.4->langfuse) (1.0.9)
Requirement already satisfied: idna in ./venv/lib/python3.12/site-packages (from httpx<1.0,>=0.15.4->langfuse) (3.10)
Requirement already satisfied: sniffio in ./venv/lib/python3.12/site-packages (from httpx<1.0,>=0.15.4->langfuse) (1.3.1)
Requirement already satisfied: h11>=0.16 in ./venv/lib/python3.12/site-packages (from httpcore==1.*->httpx<1.0,>=0.15.4->langfuse) (0.16.0)
Requirement already satisfied: importlib-metadata<8.8.0,>=6.0 in ./venv/lib/python3.12/site-packages (from opentelemetry-api<2.0.0,>=1.33.1->langfuse) (8.7.0)
Requirement already satisfied: typing-extensions>=4.5.0 in ./venv/lib/python3.12/site-packages (from opentelemetry-api<2.0.0,>=1.33.1->langfuse) (4.14.1)
Requirement already satisfied: opentelemetry-exporter-otlp-proto-grpc==1.34.1 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.34.1)
Requirement already satisfied: opentelemetry-exporter-otlp-proto-http==1.34.1 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.34.1)
Requirement already satisfied: googleapis-common-protos~=1.52 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp-proto-grpc==1.34.1->opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.70.0)
Requirement already satisfied: grpcio<2.0.0,>=1.63.2 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp-proto-grpc==1.34.1->opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.73.1)
Requirement already satisfied: opentelemetry-exporter-otlp-proto-common==1.34.1 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp-proto-grpc==1.34.1->opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.34.1)
Requirement already satisfied: opentelemetry-proto==1.34.1 in ./venv/lib/python3.12/site-packages (from opentelemetry-exporter-otlp-proto-grpc==1.34.1->opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (1.34.1)
Requirement already satisfied: protobuf<6.0,>=5.0 in ./venv/lib/python3.12/site-packages (from opentelemetry-proto==1.34.1->opentelemetry-exporter-otlp-proto-grpc==1.34.1->opentelemetry-exporter-otlp<2.0.0,>=1.33.1->langfuse) (5.29.5)
Requirement already satisfied: opentelemetry-semantic-conventions==0.55b1 in ./venv/lib/python3.12/site-packages (from opentelemetry-sdk<2.0.0,>=1.33.1->langfuse) (0.55b1)
Requirement already satisfied: annotated-types>=0.4.0 in ./venv/lib/python3.12/site-packages (from pydantic<3.0,>=1.10.7->langfuse) (0.7.0)
Requirement already satisfied: pydantic-core==2.23.2 in ./venv/lib/python3.12/site-packages (from pydantic<3.0,>=1.10.7->langfuse) (2.23.2)
Requirement already satisfied: tzdata in ./venv/lib/python3.12/site-packages (from pydantic<3.0,>=1.10.7->langfuse) (2025.2)
Requirement already satisfied: charset_normalizer<4,>=2 in ./venv/lib/python3.12/site-packages (from requests<3,>=2->langfuse) (3.4.2)
Requirement already satisfied: urllib3<3,>=1.21.1 in ./venv/lib/python3.12/site-packages (from requests<3,>=2->langfuse) (2.5.0)
Requirement already satisfied: zipp>=3.20 in ./venv/lib/python3.12/site-packages (from importlib-metadata<8.8.0,>=6.0->opentelemetry-api<2.0.0,>=1.33.1->langfuse) (3.23.0)

View File

@ -8,6 +8,8 @@ from watchfiles import watch, Change
from threading import Thread from threading import Thread
from langfuse import Langfuse from langfuse import Langfuse
from langfuse.langchain import CallbackHandler from langfuse.langchain import CallbackHandler
import yaml
from pathlib import Path
class LangfuseConfig(BaseSettings): class LangfuseConfig(BaseSettings):
enabled: bool = True enabled: bool = True
@ -87,16 +89,23 @@ class LLMConfig(BaseSettings):
class Settings: class Settings:
def __init__(self): def __init__(self):
try: try:
logger.info("Loading configuration from application.yml and environment variables")
# Load configuration from YAML file
yaml_config = self._load_yaml_config()
logger.info("Loaded YAML config: {}", yaml_config) # Add this log line
# Initialize configurations, allowing environment variables to override YAML
logger.info("Initializing LogConfig") logger.info("Initializing LogConfig")
self.log = LogConfig() self.log = LogConfig(**yaml_config.get('log', {}))
logger.info("LogConfig initialized: {}", self.log.model_dump()) logger.info("LogConfig initialized: {}", self.log.model_dump())
logger.info("Initializing LLMConfig") logger.info("Initializing LLMConfig")
self.llm = LLMConfig() self.llm = LLMConfig(**yaml_config.get('llm', {}))
logger.info("LLMConfig initialized: {}", self.llm.model_dump()) logger.info("LLMConfig initialized: {}", self.llm.model_dump())
logger.info("Initializing LangfuseConfig") logger.info("Initializing LangfuseConfig")
self.langfuse = LangfuseConfig() self.langfuse = LangfuseConfig(**yaml_config.get('langfuse', {}))
logger.info("LangfuseConfig initialized: {}", self.langfuse.model_dump()) logger.info("LangfuseConfig initialized: {}", self.langfuse.model_dump())
logger.info("Validating configuration") logger.info("Validating configuration")
@ -114,6 +123,18 @@ class Settings:
logger.error("LangfuseConfig: {}", self.langfuse.model_dump() if hasattr(self, 'langfuse') else 'Not initialized') logger.error("LangfuseConfig: {}", self.langfuse.model_dump() if hasattr(self, 'langfuse') else 'Not initialized')
raise raise
def _load_yaml_config(self):
config_path = Path('/root/development/jira-webhook-llm/config/application.yml')
if not config_path.exists():
logger.warning("Configuration file not found at {}", config_path)
return {}
try:
with open(config_path, 'r') as f:
return yaml.safe_load(f) or {}
except Exception as e:
logger.error("Error loading configuration from {}: {}", config_path, e)
return {}
def _validate(self): def _validate(self):
logger.info("LLM mode set to: '{}'", self.llm.mode) logger.info("LLM mode set to: '{}'", self.llm.mode)
@ -124,13 +145,11 @@ class Settings:
raise ValueError("LLM mode is 'openai', but OPENAI_API_BASE_URL is not set.") raise ValueError("LLM mode is 'openai', but OPENAI_API_BASE_URL is not set.")
if not self.llm.openai_model: if not self.llm.openai_model:
raise ValueError("LLM mode is 'openai', but OPENAI_MODEL is not set.") raise ValueError("LLM mode is 'openai', but OPENAI_MODEL is not set.")
elif self.llm.mode == 'ollama': elif self.llm.mode == 'ollama':
if not self.llm.ollama_base_url: if not self.llm.ollama_base_url:
raise ValueError("LLM mode is 'ollama', but OLLAMA_BASE_URL is not set.") raise ValueError("LLM mode is 'ollama', but OLLAMA_BASE_URL is not set.")
if not self.llm.ollama_model: if not self.llm.ollama_model:
raise ValueError("LLM mode is 'ollama', but OLLAMA_MODEL is not set.") raise ValueError("LLM mode is 'ollama', but OLLAMA_MODEL is not set.")
logger.info("Configuration validated successfully.") logger.info("Configuration validated successfully.")
def _init_langfuse(self): def _init_langfuse(self):
@ -161,11 +180,27 @@ class Settings:
raise raise
# Initialize CallbackHandler # Initialize CallbackHandler
self.langfuse_handler = CallbackHandler( try:
public_key=self.langfuse.public_key, self.langfuse_handler = CallbackHandler(
secret_key=self.langfuse.secret_key, public_key=self.langfuse.public_key,
host=self.langfuse.host secret_key=self.langfuse.secret_key,
) host=self.langfuse.host
)
except TypeError:
try:
# Fallback for older versions of langfuse.langchain.CallbackHandler
self.langfuse_handler = CallbackHandler(
public_key=self.langfuse.public_key,
host=self.langfuse.host
)
logger.warning("Using fallback CallbackHandler initialization - secret_key parameter not supported")
except TypeError:
# Fallback for even older versions
self.langfuse_handler = CallbackHandler(
public_key=self.langfuse.public_key
)
logger.warning("Using minimal CallbackHandler initialization - only public_key parameter supported")
logger.info("Langfuse client and handler initialized successfully")
logger.info("Langfuse client and handler initialized successfully") logger.info("Langfuse client and handler initialized successfully")
except ValueError as e: except ValueError as e:
@ -182,8 +217,13 @@ class Settings:
if change[0] == Change.modified: if change[0] == Change.modified:
logger.info("Configuration file modified, reloading settings...") logger.info("Configuration file modified, reloading settings...")
try: try:
self.llm = LLMConfig() # Reload YAML config and re-initialize all settings
yaml_config = self._load_yaml_config()
self.log = LogConfig(**yaml_config.get('log', {}))
self.llm = LLMConfig(**yaml_config.get('llm', {}))
self.langfuse = LangfuseConfig(**yaml_config.get('langfuse', {}))
self._validate() self._validate()
self._init_langfuse() # Re-initialize Langfuse client if needed
logger.info("Configuration reloaded successfully") logger.info("Configuration reloaded successfully")
except Exception as e: except Exception as e:
logger.error("Error reloading configuration: {}", e) logger.error("Error reloading configuration: {}", e)

View File

@ -1,3 +1,4 @@
import os
from dotenv import load_dotenv from dotenv import load_dotenv
load_dotenv() load_dotenv()
@ -20,12 +21,42 @@ from logging_config import configure_logging
# Initialize logging first # Initialize logging first
configure_logging(log_level="DEBUG") configure_logging(log_level="DEBUG")
import signal
try: try:
app = FastAPI() app = FastAPI()
logger.info("FastAPI application initialized") logger.info("FastAPI application initialized")
@app.on_event("shutdown")
async def shutdown_event():
"""Handle application shutdown"""
logger.info("Shutting down application...")
try:
# Cleanup Langfuse client if exists
if hasattr(settings, 'langfuse_handler') and hasattr(settings.langfuse_handler, 'close'):
try:
await settings.langfuse_handler.close()
except Exception as e:
logger.warning(f"Error closing handler: {str(e)}")
logger.info("Cleanup completed successfully")
except Exception as e:
logger.error(f"Error during shutdown: {str(e)}")
raise
def handle_shutdown_signal(signum, frame):
"""Handle OS signals for graceful shutdown"""
logger.info(f"Received signal {signum}, initiating shutdown...")
# Exit immediately after cleanup is complete
os._exit(0)
# Register signal handlers
signal.signal(signal.SIGTERM, handle_shutdown_signal)
signal.signal(signal.SIGINT, handle_shutdown_signal)
except Exception as e: except Exception as e:
logger.error(f"Error initializing FastAPI: {str(e)}") logger.critical(f"Failed to initialize FastAPI: {str(e)}")
raise logger.warning("Application cannot continue without FastAPI initialization")
sys.exit(1)
def retry(max_retries: int = 3, delay: float = 1.0): def retry(max_retries: int = 3, delay: float = 1.0):
"""Decorator for retrying failed operations""" """Decorator for retrying failed operations"""
@ -84,7 +115,14 @@ webhook_handler = JiraWebhookHandler()
@app.post("/jira-webhook") @app.post("/jira-webhook")
async def jira_webhook_handler(payload: JiraWebhookPayload): async def jira_webhook_handler(payload: JiraWebhookPayload):
return await webhook_handler.handle_webhook(payload) logger.info(f"Received webhook payload: {payload.model_dump()}")
try:
response = await webhook_handler.handle_webhook(payload)
logger.info(f"Webhook processed successfully")
return response
except Exception as e:
logger.error(f"Error processing webhook: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/test-llm") @app.post("/test-llm")
async def test_llm(): async def test_llm():
@ -101,6 +139,6 @@ async def test_llm():
) )
return await webhook_handler.handle_webhook(test_payload) return await webhook_handler.handle_webhook(test_payload)
if __name__ == "__main__": # if __name__ == "__main__":
import uvicorn # import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000) # uvicorn.run(app, host="0.0.0.0", port=8000)

View File

@ -1,7 +1,7 @@
from typing import Union from typing import Union
from langchain_ollama import OllamaLLM from langchain_ollama import OllamaLLM
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from langchain_core.prompts import PromptTemplate from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_core.output_parsers import JsonOutputParser from langchain_core.output_parsers import JsonOutputParser
from loguru import logger from loguru import logger
import sys import sys
@ -45,14 +45,15 @@ elif settings.llm.mode == 'ollama':
base_url=base_url, base_url=base_url,
streaming=False, streaming=False,
timeout=30, # 30 second timeout timeout=30, # 30 second timeout
max_retries=3, # Retry up to 3 times max_retries=3
temperature=0.1, # , # Retry up to 3 times
top_p=0.2 # temperature=0.1,
# top_p=0.2
) )
# Test connection # Test connection
logger.debug("Testing Ollama connection...") logger.debug("Testing Ollama connection...")
llm.invoke("test") # Simple test request # llm.invoke("test") # Simple test request
logger.info("Ollama connection established successfully") logger.info("Ollama connection established successfully")
except Exception as e: except Exception as e:
@ -84,30 +85,50 @@ parser = JsonOutputParser(pydantic_object=AnalysisFlags)
def load_prompt_template(version="v1.1.0"): def load_prompt_template(version="v1.1.0"):
try: try:
with open(f"llm/prompts/jira_analysis_{version}.txt", "r") as f: with open(f"llm/prompts/jira_analysis_{version}.txt", "r") as f:
template = f.read() template_content = f.read()
return PromptTemplate(
template=template, # Split system and user parts
input_variables=[ system_template, user_template = template_content.split("\n\nUSER:\n")
"issueKey", "summary", "description", "status", "labels", system_template = system_template.replace("SYSTEM:\n", "").strip()
"assignee", "updated", "comment"
], return ChatPromptTemplate.from_messages([
partial_variables={"format_instructions": parser.get_format_instructions()}, SystemMessagePromptTemplate.from_template(system_template),
) HumanMessagePromptTemplate.from_template(user_template)
])
except Exception as e: except Exception as e:
logger.error(f"Failed to load prompt template: {str(e)}") logger.error(f"Failed to load prompt template: {str(e)}")
raise raise
# Fallback prompt template # Fallback prompt template
FALLBACK_PROMPT = PromptTemplate( FALLBACK_PROMPT = ChatPromptTemplate.from_messages([
template="Please analyze this Jira ticket and provide a basic summary.", SystemMessagePromptTemplate.from_template(
input_variables=["issueKey", "summary"] "Analyze Jira tickets and output JSON with hasMultipleEscalations, customerSentiment"
) ),
HumanMessagePromptTemplate.from_template(
"Issue Key: {issueKey}\nSummary: {summary}"
)
])
# Create chain with fallback mechanism # Create chain with fallback mechanism
def create_analysis_chain(): def create_analysis_chain():
try: try:
prompt_template = load_prompt_template() prompt_template = load_prompt_template()
chain = prompt_template | llm | parser chain = (
{
"issueKey": lambda x: x["issueKey"],
"summary": lambda x: x["summary"],
"description": lambda x: x["description"],
"status": lambda x: x["status"],
"labels": lambda x: x["labels"],
"assignee": lambda x: x["assignee"],
"updated": lambda x: x["updated"],
"comment": lambda x: x["comment"],
"format_instructions": lambda _: parser.get_format_instructions()
}
| prompt_template
| llm
| parser
)
# Add langfuse handler if enabled # Add langfuse handler if enabled
if settings.langfuse.enabled: if settings.langfuse.enabled:
@ -139,7 +160,8 @@ def validate_response(response: Union[dict, str]) -> bool:
try: try:
response = json.loads(response) response = json.loads(response)
except json.JSONDecodeError: except json.JSONDecodeError:
return False logger.error(f"Invalid JSON response: {response}")
raise ValueError("Invalid JSON response format")
# Ensure response is a dictionary # Ensure response is a dictionary
if not isinstance(response, dict): if not isinstance(response, dict):

View File

@ -29,9 +29,13 @@ class AnalysisFlags(BaseModel):
def __init__(self, **data): def __init__(self, **data):
super().__init__(**data) super().__init__(**data)
# Track model usage if Langfuse is enabled # Track model usage if Langfuse is enabled and client is available
if settings.langfuse.enabled: if settings.langfuse.enabled and hasattr(settings, 'langfuse_client'):
try: try:
if settings.langfuse_client is None:
logger.warning("Langfuse client is None despite being enabled")
return
settings.langfuse_client.trace( settings.langfuse_client.trace(
name="LLM Model Usage", name="LLM Model Usage",
input=data, input=data,

View File

@ -1,4 +1,4 @@
SYSTEM INSTRUCTIONS: SYSTEM:
You are an AI assistant designed to analyze Jira ticket details containing email correspondence and extract key flags and sentiment, outputting information in a strict JSON format. You are an AI assistant designed to analyze Jira ticket details containing email correspondence and extract key flags and sentiment, outputting information in a strict JSON format.
Your output MUST be ONLY a valid JSON object. Do NOT include any conversational text, explanations, or markdown outside the JSON. Your output MUST be ONLY a valid JSON object. Do NOT include any conversational text, explanations, or markdown outside the JSON.
@ -14,8 +14,9 @@ Consider the overall context of the ticket and specifically the latest comment i
-- Usually means that Customer is asking for help due to upcoming deadlines, other high priority issues which are blocked due to our stall. -- Usually means that Customer is asking for help due to upcoming deadlines, other high priority issues which are blocked due to our stall.
- Summarize the overall customer sentiment evident in the issue. Analyze tone of responses, happiness, gratefulness, irritation, etc. - Summarize the overall customer sentiment evident in the issue. Analyze tone of responses, happiness, gratefulness, irritation, etc.
{format_instructions}
USER CONTENT: USER:
Issue Key: {issueKey} Issue Key: {issueKey}
Summary: {summary} Summary: {summary}
Description: {description} Description: {description}
@ -23,6 +24,4 @@ Status: {status}
Existing Labels: {labels} Existing Labels: {labels}
Assignee: {assignee} Assignee: {assignee}
Last Updated: {updated} Last Updated: {updated}
Latest Comment (if applicable): {comment} Latest Comment (if applicable): {comment}
{format_instructions}

View File

@ -5,11 +5,60 @@ from datetime import datetime
from typing import Optional from typing import Optional
from loguru import logger from loguru import logger
# Initialize logger with default configuration # Basic fallback logging configuration
logger.remove() logger.remove()
logger.add(sys.stderr, level="WARNING", format="{message}") logger.add(sys.stderr, level="WARNING", format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}")
def configure_logging(log_level: str = "INFO", log_dir: Optional[str] = None): def configure_logging(log_level: str = "INFO", log_dir: Optional[str] = None):
"""Configure structured logging for the application with fallback handling"""
try:
# Log that we're attempting to configure logging
logger.warning("Attempting to configure logging...")
# Default log directory
if not log_dir:
log_dir = os.getenv("LOG_DIR", "logs")
# Create log directory if it doesn't exist
Path(log_dir).mkdir(parents=True, exist_ok=True)
# Log file path with timestamp
log_file = Path(log_dir) / f"jira-webhook-llm_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
# Remove any existing loggers
logger.remove()
# Add console logger
logger.add(
sys.stdout,
level=log_level,
format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {extra[request_id]} | {message}",
colorize=True,
backtrace=True,
diagnose=True
)
# Add file logger
logger.add(
str(log_file),
level=log_level,
format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {extra[request_id]} | {message}",
rotation="100 MB",
retention="30 days",
compression="zip",
backtrace=True,
diagnose=True
)
# Configure default extras
logger.configure(extra={"request_id": "N/A"})
logger.info("Logging configured successfully")
except Exception as e:
# Fallback to basic logging if configuration fails
logger.remove()
logger.add(sys.stderr, level="WARNING", format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}")
logger.error(f"Failed to configure logging: {str(e)}. Using fallback logging configuration.")
"""Configure structured logging for the application""" """Configure structured logging for the application"""
# Default log directory # Default log directory

View File

@ -15,4 +15,5 @@ unittest2>=1.1.0
pytest==8.2.0 pytest==8.2.0
pytest-asyncio==0.23.5 pytest-asyncio==0.23.5
pytest-cov==4.1.0 pytest-cov==4.1.0
httpx==0.27.0 httpx==0.27.0
PyYAML

View File

@ -42,17 +42,15 @@ class JiraWebhookHandler:
# Create Langfuse trace if enabled # Create Langfuse trace if enabled
trace = None trace = None
if settings.langfuse.enabled: if settings.langfuse.enabled:
trace = settings.langfuse_client.trace( trace = settings.langfuse_client.start_span(
Langfuse().trace( name="Jira Webhook",
id=f"webhook-{payload.issueKey}", input=payload.dict(),
name="Jira Webhook", metadata={
input=payload.dict(), "trace_id": f"webhook-{payload.issueKey}",
metadata={
"issue_key": payload.issueKey, "issue_key": payload.issueKey,
"timestamp": datetime.utcnow().isoformat() "timestamp": datetime.utcnow().isoformat()
} }
) )
)
llm_input = { llm_input = {
"issueKey": payload.issueKey, "issueKey": payload.issueKey,
@ -68,7 +66,7 @@ class JiraWebhookHandler:
# Create Langfuse span for LLM processing if enabled # Create Langfuse span for LLM processing if enabled
llm_span = None llm_span = None
if settings.langfuse.enabled and trace: if settings.langfuse.enabled and trace:
llm_span = trace.span( llm_span = trace.start_span(
name="LLM Processing", name="LLM Processing",
input=llm_input, input=llm_input,
metadata={ metadata={
@ -81,7 +79,8 @@ class JiraWebhookHandler:
# Update Langfuse span with output if enabled # Update Langfuse span with output if enabled
if settings.langfuse.enabled and llm_span: if settings.langfuse.enabled and llm_span:
llm_span.end(output=analysis_result) llm_span.update(output=analysis_result)
llm_span.end()
# Validate LLM response # Validate LLM response
if not validate_response(analysis_result): if not validate_response(analysis_result):
@ -99,7 +98,8 @@ class JiraWebhookHandler:
# Log error to Langfuse if enabled # Log error to Langfuse if enabled
if settings.langfuse.enabled and llm_span: if settings.langfuse.enabled and llm_span:
llm_span.end(error=e) llm_span.error(e)
llm_span.end()
return { return {
"status": "error", "status": "error",
"analysis_flags": { "analysis_flags": {