jira-webhook-llm/llm/models.py
Ireneusz Bachanowicz 935a8a49ae
Some checks are pending
CI/CD Pipeline / test (push) Waiting to run
Almost stable tests
2025-07-17 02:21:56 +02:00

57 lines
2.2 KiB
Python

from typing import Optional, List, Union
from loguru import logger
from pydantic import BaseModel, ConfigDict, field_validator, Field
from config import settings
class LLMResponse(BaseModel):
status: str
message: str
class JiraWebhookPayload(BaseModel):
model_config = ConfigDict(alias_generator=lambda x: ''.join(word.capitalize() if i > 0 else word for i, word in enumerate(x.split('_'))), populate_by_name=True)
issueKey: str
projectKey: Optional[str] = None # Added missing field
summary: str
description: Optional[str] = None
comment: Optional[str] = None
labels: Optional[Union[List[str], str]] = []
@field_validator('labels', mode='before')
@classmethod
def convert_labels_to_list(cls, v):
if isinstance(v, str):
return [v]
return v or []
status: Optional[str] = None
assignee: Optional[str] = None
updated: Optional[str] = None
class AnalysisFlags(BaseModel):
hasMultipleEscalations: bool = Field(description="Is there evidence of multiple escalation attempts?")
customerSentiment: Optional[str] = Field(description="Overall customer sentiment (e.g., 'neutral', 'frustrated', 'calm').")
def __init__(self, **data):
super().__init__(**data)
# Track model usage if Langfuse is enabled and client is available
if settings.langfuse.enabled and hasattr(settings, 'langfuse_client'):
try:
if settings.langfuse_client is None:
logger.warning("Langfuse client is None despite being enabled")
return
settings.langfuse_client.trace(
name="LLM Model Usage",
input=data,
metadata={
"model": settings.llm.model if settings.llm.mode == 'openai' else settings.llm.ollama_model,
"analysis_flags": {
"hasMultipleEscalations": self.hasMultipleEscalations,
"customerSentiment": self.customerSentiment
}
}
)
except Exception as e:
logger.error(f"Failed to track model usage: {e}")