144 lines
5.1 KiB
Python
144 lines
5.1 KiB
Python
import os
|
|
import logging
|
|
import sys
|
|
from typing import Optional
|
|
from pydantic_settings import BaseSettings
|
|
from langfuse._client.client import Langfuse
|
|
from pydantic import field_validator
|
|
from pydantic_settings import SettingsConfigDict
|
|
import yaml
|
|
_logger = logging.getLogger(__name__)
|
|
from pathlib import Path
|
|
|
|
class LangfuseConfig(BaseSettings):
|
|
enabled: bool = False
|
|
secret_key: Optional[str] = None
|
|
public_key: Optional[str] = None
|
|
host: Optional[str] = None
|
|
|
|
model_config = SettingsConfigDict(
|
|
env_prefix='LANGFUSE_',
|
|
env_file='.env',
|
|
env_file_encoding='utf-8',
|
|
extra='ignore'
|
|
)
|
|
|
|
class LLMConfig(BaseSettings):
|
|
mode: str = 'ollama'
|
|
|
|
# OpenAI settings
|
|
openai_api_key: Optional[str] = None
|
|
openai_api_base_url: Optional[str] = None
|
|
openai_model: Optional[str] = None
|
|
|
|
# Ollama settings
|
|
ollama_base_url: Optional[str] = None
|
|
ollama_model: Optional[str] = None
|
|
|
|
# Gemini settings
|
|
gemini_api_key: Optional[str] = None
|
|
gemini_model: Optional[str] = None
|
|
gemini_api_base_url: Optional[str] = None # Add this for Gemini
|
|
|
|
@field_validator('mode')
|
|
def validate_mode(cls, v):
|
|
if v not in ['openai', 'ollama', 'gemini']: # Add 'gemini'
|
|
raise ValueError("LLM mode must be 'openai', 'ollama', or 'gemini'")
|
|
return v
|
|
|
|
model_config = SettingsConfigDict(
|
|
env_prefix='LLM_',
|
|
env_file='.env',
|
|
env_file_encoding='utf-8',
|
|
extra='ignore'
|
|
)
|
|
|
|
|
|
class ProcessorConfig(BaseSettings):
|
|
poll_interval_seconds: int = 10
|
|
max_retries: int = 5
|
|
initial_retry_delay_seconds: int = 60
|
|
|
|
model_config = SettingsConfigDict(
|
|
env_prefix='PROCESSOR_',
|
|
env_file='.env',
|
|
env_file_encoding='utf-8',
|
|
extra='ignore'
|
|
)
|
|
|
|
class Settings:
|
|
def __init__(self):
|
|
try:
|
|
# Load configuration from YAML file
|
|
yaml_config = self._load_yaml_config()
|
|
|
|
# Initialize configurations
|
|
llm_config_data = yaml_config.get('llm', {})
|
|
|
|
# Extract and flatten nested LLM configurations
|
|
mode = llm_config_data.get('mode', 'ollama')
|
|
openai_settings = llm_config_data.get('openai') or {}
|
|
ollama_settings = llm_config_data.get('ollama') or {}
|
|
gemini_settings = llm_config_data.get('gemini') or {} # New: Get Gemini settings
|
|
|
|
# Combine all LLM settings, prioritizing top-level 'mode'
|
|
combined_llm_settings = {
|
|
'mode': mode,
|
|
**{f'openai_{k}': v for k, v in openai_settings.items()},
|
|
**{f'ollama_{k}': v for k, v in ollama_settings.items()},
|
|
**{f'gemini_{k}': v for k, v in gemini_settings.items()} # New: Add Gemini settings
|
|
}
|
|
|
|
self.llm = LLMConfig(**combined_llm_settings)
|
|
self.processor = ProcessorConfig(**yaml_config.get('processor', {}))
|
|
self.langfuse = LangfuseConfig(**yaml_config.get('langfuse', {}))
|
|
|
|
# Initialize Langfuse client if enabled
|
|
self.langfuse_client: Optional[Langfuse] = None
|
|
if self.langfuse.enabled:
|
|
if self.langfuse.secret_key and self.langfuse.public_key and self.langfuse.host:
|
|
self.langfuse_client = Langfuse(
|
|
public_key=self.langfuse.public_key,
|
|
secret_key=self.langfuse.secret_key,
|
|
host=self.langfuse.host
|
|
)
|
|
else:
|
|
_logger.warning("Langfuse is enabled but missing one or more of LANGFUSE_SECRET_KEY, LANGFUSE_PUBLIC_KEY, or LANGFUSE_HOST. Langfuse client will not be initialized.")
|
|
|
|
self._validate()
|
|
|
|
except Exception as e:
|
|
print(f"Configuration initialization failed: {e}")
|
|
sys.exit(1)
|
|
|
|
def _load_yaml_config(self):
|
|
config_path = Path('config/application.yml')
|
|
if not config_path.exists():
|
|
return {}
|
|
try:
|
|
with open(config_path, 'r') as f:
|
|
return yaml.safe_load(f) or {}
|
|
except Exception as e:
|
|
return {}
|
|
|
|
def _validate(self):
|
|
if self.llm.mode == 'openai':
|
|
if not self.llm.openai_api_key:
|
|
raise ValueError("OPENAI_API_KEY is not set.")
|
|
if not self.llm.openai_api_base_url:
|
|
raise ValueError("OPENAI_API_BASE_URL is not set.")
|
|
if not self.llm.openai_model:
|
|
raise ValueError("OPENAI_MODEL is not set.")
|
|
elif self.llm.mode == 'ollama':
|
|
if not self.llm.ollama_base_url:
|
|
raise ValueError("OLLAMA_BASE_URL is not set.")
|
|
if not self.llm.ollama_model:
|
|
raise ValueError("OLLAMA_MODEL is not set.")
|
|
elif self.llm.mode == 'gemini': # New: Add validation for Gemini mode
|
|
if not self.llm.gemini_api_key:
|
|
raise ValueError("GEMINI_API_KEY is not set.")
|
|
if not self.llm.gemini_model:
|
|
raise ValueError("GEMINI_MODEL is not set.")
|
|
|
|
# Create settings instance
|
|
settings = Settings() |