Some checks are pending
CI/CD Pipeline / test (push) Waiting to run
- Simplified the FastAPI application structure and improved error handling with middleware. - Introduced a retry decorator for asynchronous functions to enhance reliability. - Modularized the LLM initialization and prompt loading into separate functions for better maintainability. - Updated Pydantic models for Jira webhook payload and analysis flags to ensure proper validation and structure. - Implemented a structured logging configuration for better traceability and debugging. - Added comprehensive unit tests for prompt loading, response validation, and webhook handling. - Established a CI/CD pipeline with GitHub Actions for automated testing and coverage reporting. - Enhanced the prompt template for LLM analysis to include specific instructions for handling escalations.
74 lines
2.5 KiB
Python
74 lines
2.5 KiB
Python
from langchain_ollama import OllamaLLM
|
|
from langchain_openai import ChatOpenAI
|
|
from langchain_core.prompts import PromptTemplate
|
|
from langchain_core.output_parsers import JsonOutputParser
|
|
from loguru import logger
|
|
|
|
from config import settings
|
|
from .models import AnalysisFlags
|
|
|
|
# Initialize LLM
|
|
llm = None
|
|
if settings.llm.mode == 'openai':
|
|
logger.info(f"Initializing ChatOpenAI with model: {settings.openai_model}")
|
|
llm = ChatOpenAI(
|
|
model=settings.openai_model,
|
|
temperature=0.7,
|
|
max_tokens=2000,
|
|
api_key=settings.openai_api_key,
|
|
base_url=settings.openai_api_base_url
|
|
)
|
|
elif settings.llm.mode == 'ollama':
|
|
logger.info(f"Initializing OllamaLLM with model: {settings.llm.ollama_model} at {settings.llm.ollama_base_url}")
|
|
llm = OllamaLLM(
|
|
model=settings.llm.ollama_model,
|
|
base_url=settings.llm.ollama_base_url,
|
|
streaming=False
|
|
)
|
|
|
|
if llm is None:
|
|
logger.error("LLM could not be initialized. Exiting.")
|
|
sys.exit(1)
|
|
|
|
# Set up Output Parser for structured JSON
|
|
parser = JsonOutputParser(pydantic_object=AnalysisFlags)
|
|
|
|
# Load prompt template from file
|
|
def load_prompt_template(version="v1.0.0"):
|
|
try:
|
|
with open(f"llm/prompts/jira_analysis_{version}.txt", "r") as f:
|
|
template = f.read()
|
|
return PromptTemplate(
|
|
template=template,
|
|
input_variables=[
|
|
"issueKey", "summary", "description", "status", "labels",
|
|
"assignee", "updated", "comment"
|
|
],
|
|
partial_variables={"format_instructions": parser.get_format_instructions()},
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Failed to load prompt template: {str(e)}")
|
|
raise
|
|
|
|
# Fallback prompt template
|
|
FALLBACK_PROMPT = PromptTemplate(
|
|
template="Please analyze this Jira ticket and provide a basic summary.",
|
|
input_variables=["issueKey", "summary"]
|
|
)
|
|
|
|
# Create chain with fallback mechanism
|
|
def create_analysis_chain():
|
|
try:
|
|
prompt_template = load_prompt_template()
|
|
return prompt_template | llm | parser
|
|
except Exception as e:
|
|
logger.warning(f"Using fallback prompt due to error: {str(e)}")
|
|
return FALLBACK_PROMPT | llm | parser
|
|
|
|
# Initialize analysis chain
|
|
analysis_chain = create_analysis_chain()
|
|
|
|
# Response validation function
|
|
def validate_response(response: dict) -> bool:
|
|
required_fields = ["hasMultipleEscalations", "customerSentiment"]
|
|
return all(field in response for field in required_fields) |