# Default application configuration llm: # The mode to run the application in. # Can be 'openai' or 'ollama'. # This can be overridden by the LLM_MODE environment variable. mode: ollama # Settings for OpenAI-compatible APIs (like OpenRouter) openai: # It's HIGHLY recommended to set this via an environment variable # instead of saving it in this file. # Can be overridden by OPENAI_API_KEY api_key: "sk-or-v1-..." # Can be overridden by OPENAI_API_BASE_URL api_base_url: "https://openrouter.ai/api/v1" # Can be overridden by OPENAI_MODEL model: "deepseek/deepseek-chat:free" # Settings for Ollama ollama: # Can be overridden by OLLAMA_BASE_URL base_url: "http://192.168.0.140:11434" # base_url: "https://api-amer-sandbox-gbl-mdm-hub.pfizer.com/ollama" # Can be overridden by OLLAMA_MODEL model: "phi4-mini:latest" # model: "qwen3:1.7b" # model: "smollm:360m" # model: "qwen3:0.6b" # Langfuse configuration for observability and analytics langfuse: # Enable or disable Langfuse integration # Can be overridden by LANGFUSE_ENABLED environment variable enabled: true # Langfuse API credentials # It's HIGHLY recommended to set these via environment variables # instead of saving them in this file public_key: "pk-lf-17dfde63-93e2-4983-8aa7-2673d3ecaab8" secret_key: "sk-lf-ba41a266-6fe5-4c90-a483-bec8a7aaa321" host: "https://cloud.langfuse.com" # Processor configuration processor: # Interval in seconds between polling for new Jira analysis requests # Can be overridden by PROCESSOR_POLL_INTERVAL_SECONDS environment variable poll_interval_seconds: 30 # Maximum number of retries for failed Jira analysis requests # Can be overridden by PROCESSOR_MAX_RETRIES environment variable max_retries: 5 # Initial delay in seconds before the first retry attempt (exponential backoff) # Can be overridden by PROCESSOR_INITIAL_RETRY_DELAY_SECONDS environment variable initial_retry_delay_seconds: 60