jira-webhook-llm/llm/models.py
Ireneusz Bachanowicz 8c1ab79eeb
Some checks are pending
CI/CD Pipeline / test (push) Waiting to run
Refactor LLM analysis chain and models; remove deprecated prompt files
- Updated `chains.py` to streamline imports and improve error handling for LLM initialization.
- Modified `models.py` to enhance the `AnalysisFlags` model with field aliases and added datetime import.
- Deleted outdated prompt files (`jira_analysis_v1.0.0.txt`, `jira_analysis_v1.1.0.txt`, `jira_analysis_v1.2.0.txt`) to clean up the repository.
- Introduced a new prompt file `jira_analysis_v1.2.0.txt` with updated instructions for analysis.
- Removed `logging_config.py` and test files to simplify the codebase.
- Updated webhook handler to improve error handling and logging.
- Added a new shared store for managing processing requests in a thread-safe manner.
2025-07-21 01:06:45 +02:00

77 lines
2.9 KiB
Python

from typing import Optional, List, Union
from enum import Enum
from loguru import logger
from pydantic import BaseModel, ConfigDict, field_validator, Field
from datetime import datetime
from config import settings
class LLMResponse(BaseModel):
status: str
message: str
class CustomerSentiment(str, Enum):
NEUTRAL = "neutral"
FRUSTRATED = "frustrated"
CALM = "calm"
# Add other sentiments as needed
class JiraWebhookPayload(BaseModel):
model_config = ConfigDict(alias_generator=lambda x: ''.join(word.capitalize() if i > 0 else word for i, word in enumerate(x.split('_'))), populate_by_name=True)
issueKey: str
summary: str
description: Optional[str] = None
comment: Optional[str] = None
labels: Optional[Union[List[str], str]] = []
@field_validator('labels', mode='before')
@classmethod
def convert_labels_to_list(cls, v):
if isinstance(v, str):
return [v]
return v or []
status: Optional[str] = None
assignee: Optional[str] = None
updated: Optional[str] = None
class AnalysisFlags(BaseModel):
hasMultipleEscalations: bool = Field(alias="Hasmultipleescalations", description="Is there evidence of multiple escalation attempts?")
customerSentiment: Optional[CustomerSentiment] = Field(alias="CustomerSentiment", description="Overall customer sentiment (e.g., 'neutral', 'frustrated', 'calm').")
def __init__(self, **data):
super().__init__(**data)
# Track model usage if Langfuse is enabled and client is available
if settings.langfuse.enabled and hasattr(settings, 'langfuse_client'):
try:
if settings.langfuse_client is None:
logger.warning("Langfuse client is None despite being enabled")
return
settings.langfuse_client.start_span( # Use start_span
name="LLM Model Usage",
input=data,
metadata={
"model": settings.llm.model if settings.llm.mode == 'openai' else settings.llm.ollama_model,
"analysis_flags": {
"hasMultipleEscalations": self.hasMultipleEscalations,
"customerSentiment": self.customerSentiment.value if self.customerSentiment else None
}
}
).end() # End the trace immediately as it's just for tracking model usage
except Exception as e:
logger.error(f"Failed to track model usage: {e}")
class JiraAnalysisResponse(BaseModel):
model_config = ConfigDict(from_attributes=True)
id: int
issue_key: str
status: str
issue_summary: str
request_payload: dict
analysis_result: Optional[dict] = None
created_at: datetime
updated_at: datetime
error_message: Optional[str] = None
raw_response: Optional[dict] = None