jira-webhook-llm/config.py
Ireneusz Bachanowicz 8c1ab79eeb
Some checks are pending
CI/CD Pipeline / test (push) Waiting to run
Refactor LLM analysis chain and models; remove deprecated prompt files
- Updated `chains.py` to streamline imports and improve error handling for LLM initialization.
- Modified `models.py` to enhance the `AnalysisFlags` model with field aliases and added datetime import.
- Deleted outdated prompt files (`jira_analysis_v1.0.0.txt`, `jira_analysis_v1.1.0.txt`, `jira_analysis_v1.2.0.txt`) to clean up the repository.
- Introduced a new prompt file `jira_analysis_v1.2.0.txt` with updated instructions for analysis.
- Removed `logging_config.py` and test files to simplify the codebase.
- Updated webhook handler to improve error handling and logging.
- Added a new shared store for managing processing requests in a thread-safe manner.
2025-07-21 01:06:45 +02:00

112 lines
3.3 KiB
Python

import os
import sys
from typing import Optional
from pydantic_settings import BaseSettings
from pydantic import field_validator, ConfigDict
import yaml
from pathlib import Path
class LangfuseConfig(BaseSettings):
enabled: bool = False
secret_key: Optional[str] = None
public_key: Optional[str] = None
host: Optional[str] = None
model_config = ConfigDict(
env_prefix='LANGFUSE_',
env_file='.env',
env_file_encoding='utf-8',
extra='ignore'
)
class LLMConfig(BaseSettings):
mode: str = 'ollama'
# OpenAI settings
openai_api_key: Optional[str] = None
openai_api_base_url: Optional[str] = None
openai_model: Optional[str] = None
# Ollama settings
ollama_base_url: Optional[str] = None
ollama_model: Optional[str] = None
@field_validator('mode')
def validate_mode(cls, v):
if v not in ['openai', 'ollama']:
raise ValueError("LLM mode must be either 'openai' or 'ollama'")
return v
model_config = ConfigDict(
env_prefix='LLM_',
env_file='.env',
env_file_encoding='utf-8',
extra='ignore'
)
class ApiConfig(BaseSettings):
api_key: Optional[str] = None
model_config = ConfigDict(
env_prefix='API_',
env_file='.env',
env_file_encoding='utf-8',
extra='ignore'
)
class ProcessorConfig(BaseSettings):
poll_interval_seconds: int = 10
max_retries: int = 5
initial_retry_delay_seconds: int = 60
model_config = ConfigDict(
env_prefix='PROCESSOR_',
env_file='.env',
env_file_encoding='utf-8',
extra='ignore'
)
class Settings:
def __init__(self):
try:
# Load configuration from YAML file
yaml_config = self._load_yaml_config()
# Initialize configurations
self.llm = LLMConfig(**yaml_config.get('llm', {}))
self.api = ApiConfig(**yaml_config.get('api', {}))
self.processor = ProcessorConfig(**yaml_config.get('processor', {}))
self.langfuse = LangfuseConfig(**yaml_config.get('langfuse', {}))
self._validate()
except Exception as e:
print(f"Configuration initialization failed: {e}")
sys.exit(1)
def _load_yaml_config(self):
config_path = Path('config/application.yml')
if not config_path.exists():
return {}
try:
with open(config_path, 'r') as f:
return yaml.safe_load(f) or {}
except Exception as e:
return {}
def _validate(self):
if self.llm.mode == 'openai':
if not self.llm.openai_api_key:
raise ValueError("OPENAI_API_KEY is not set.")
if not self.llm.openai_api_base_url:
raise ValueError("OPENAI_API_BASE_URL is not set.")
if not self.llm.openai_model:
raise ValueError("OPENAI_MODEL is not set.")
elif self.llm.mode == 'ollama':
if not self.llm.ollama_base_url:
raise ValueError("OLLAMA_BASE_URL is not set.")
if not self.llm.ollama_model:
raise ValueError("OLLAMA_MODEL is not set.")
# Create settings instance
settings = Settings()