# Default application configuration llm: # The mode to run the application in. # Can be 'openai' or 'ollama'. # This can be overridden by the LLM_MODE environment variable. mode: ollama # Settings for OpenAI-compatible APIs (like OpenRouter) openai: # It's HIGHLY recommended to set this via an environment variable # instead of saving it in this file. # Can be overridden by OPENAI_API_KEY api_key: "sk-or-v1-..." # Can be overridden by OPENAI_API_BASE_URL api_base_url: "https://openrouter.ai/api/v1" # Can be overridden by OPENAI_MODEL model: "deepseek/deepseek-chat:free" # Settings for Ollama ollama: # Can be overridden by OLLAMA_BASE_URL base_url: "http://192.168.0.140:11434" # base_url: "https://api-amer-sandbox-gbl-mdm-hub.pfizer.com/ollama" # Can be overridden by OLLAMA_MODEL model: "phi4-mini:latest" # model: "qwen3:1.7b" # model: "smollm:360m" # model: "qwen3:0.6b" # Langfuse configuration for observability and analytics langfuse: # Enable or disable Langfuse integration # Can be overridden by LANGFUSE_ENABLED environment variable enabled: true # Langfuse API credentials # It's HIGHLY recommended to set these via environment variables # instead of saving them in this file public_key: "pk-lf-17dfde63-93e2-4983-8aa7-2673d3ecaab8" secret_key: "sk-lf-ba41a266-6fe5-4c90-a483-bec8a7aaa321" host: "https://cloud.langfuse.com" # Redis configuration for rate limiting redis: # Enable or disable rate limiting # Can be overridden by REDIS_ENABLED environment variable enabled: true # Redis connection settings # Can be overridden by REDIS_URL environment variable url: "redis://localhost:6379/0" # Rate limiting settings rate_limit: # Time window in seconds window: 60 # Maximum requests per window max_requests: 100