Some checks are pending
CI/CD Pipeline / test (push) Waiting to run
- Simplified the FastAPI application structure and improved error handling with middleware. - Introduced a retry decorator for asynchronous functions to enhance reliability. - Modularized the LLM initialization and prompt loading into separate functions for better maintainability. - Updated Pydantic models for Jira webhook payload and analysis flags to ensure proper validation and structure. - Implemented a structured logging configuration for better traceability and debugging. - Added comprehensive unit tests for prompt loading, response validation, and webhook handling. - Established a CI/CD pipeline with GitHub Actions for automated testing and coverage reporting. - Enhanced the prompt template for LLM analysis to include specific instructions for handling escalations.
90 lines
3.0 KiB
Python
90 lines
3.0 KiB
Python
import os
|
|
import sys
|
|
from typing import Optional
|
|
from pydantic_settings import BaseSettings
|
|
from pydantic import validator, ConfigDict
|
|
from loguru import logger
|
|
from watchfiles import watch, Change
|
|
from threading import Thread
|
|
|
|
class LogConfig(BaseSettings):
|
|
level: str = 'INFO'
|
|
|
|
model_config = ConfigDict(
|
|
env_prefix='LOG_',
|
|
extra='ignore'
|
|
)
|
|
|
|
class LLMConfig(BaseSettings):
|
|
mode: str = 'ollama'
|
|
|
|
# OpenAI settings
|
|
openai_api_key: Optional[str] = None
|
|
openai_api_base_url: Optional[str] = None
|
|
openai_model: Optional[str] = None
|
|
|
|
# Ollama settings
|
|
ollama_base_url: Optional[str] = None
|
|
ollama_model: Optional[str] = None
|
|
|
|
@validator('mode')
|
|
def validate_mode(cls, v):
|
|
if v not in ['openai', 'ollama']:
|
|
raise ValueError("LLM mode must be either 'openai' or 'ollama'")
|
|
return v
|
|
|
|
model_config = ConfigDict(
|
|
env_prefix='LLM_',
|
|
env_file='.env',
|
|
env_file_encoding='utf-8',
|
|
extra='ignore'
|
|
)
|
|
|
|
class Settings:
|
|
def __init__(self):
|
|
self.log = LogConfig()
|
|
self.llm = LLMConfig()
|
|
self._validate()
|
|
self._start_watcher()
|
|
|
|
def _validate(self):
|
|
logger.info(f"LLM mode set to: '{self.llm.mode}'")
|
|
|
|
if self.llm.mode == 'openai':
|
|
if not self.llm.openai_api_key:
|
|
raise ValueError("LLM mode is 'openai', but OPENAI_API_KEY is not set.")
|
|
if not self.llm.openai_api_base_url:
|
|
raise ValueError("LLM mode is 'openai', but OPENAI_API_BASE_URL is not set.")
|
|
if not self.llm.openai_model:
|
|
raise ValueError("LLM mode is 'openai', but OPENAI_MODEL is not set.")
|
|
|
|
elif self.llm.mode == 'ollama':
|
|
if not self.llm.ollama_base_url:
|
|
raise ValueError("LLM mode is 'ollama', but OLLAMA_BASE_URL is not set.")
|
|
if not self.llm.ollama_model:
|
|
raise ValueError("LLM mode is 'ollama', but OLLAMA_MODEL is not set.")
|
|
|
|
logger.info("Configuration validated successfully.")
|
|
|
|
def _start_watcher(self):
|
|
def watch_config():
|
|
for changes in watch('config/application.yml'):
|
|
for change in changes:
|
|
if change[0] == Change.modified:
|
|
logger.info("Configuration file modified, reloading settings...")
|
|
try:
|
|
self.llm = LLMConfig()
|
|
self._validate()
|
|
logger.info("Configuration reloaded successfully")
|
|
except Exception as e:
|
|
logger.error(f"Error reloading configuration: {e}")
|
|
|
|
Thread(target=watch_config, daemon=True).start()
|
|
|
|
# Create a single, validated instance of the settings to be imported by other modules.
|
|
try:
|
|
settings = Settings()
|
|
except ValueError as e:
|
|
logger.error(f"FATAL: {e}")
|
|
logger.error("Application shutting down due to configuration error.")
|
|
sys.exit(1) |