jira-webhook-llm/config/application-cnf_amer.yml
Ireneusz Bachanowicz d1fa9385e7
Some checks failed
CI/CD Pipeline / test (push) Has been cancelled
Release for qwen3:4b model
2025-08-01 18:15:57 +02:00

77 lines
2.7 KiB
YAML

# Default application configuration
llm:
# The mode to run the application in.
# Can be 'openai' or 'ollama'.
# This can be overridden by the LLM_MODE environment variable.
mode: ollama # Change mode to gemini
# Settings for OpenAI-compatible APIs (like OpenRouter)
openai:
# It's HIGHLY recommended to set this via an environment variable
# instead of saving it in this file.
# Can be overridden by OPENAI_API_KEY
# api_key: "sk-or-v1-..."
# api_key: "your-openai-api-key" # Keep this commented out or set to a placeholder
# Can be overridden by OPENAI_API_BASE_URL
# api_base_url: "https://openrouter.ai/api/v1"
# api_base_url: "https://api.openai.com/v1" # Remove or comment out this line
# Can be overridden by OPENAI_MODEL
# model: "deepseek/deepseek-chat:free"
# model: "gpt-4o" # Keep this commented out or set to a placeholder
# Settings for Gemini
gemini:
# It's HIGHLY recommended to set this via an environment variable
# instead of saving it in this file.
# Can be overridden by GEMINI_API_KEY
api_key: ""
# Can be overridden by GEMINI_MODEL
# model: "gemini-2.5-flash"
model: "gemini-2.5-flash-lite-preview-06-17"
# Can be overridden by GEMINI_API_BASE_URL
api_base_url: "https://generativelanguage.googleapis.com/v1beta/" # Add for Gemini
# Settings for Ollama
ollama:
# Can be overridden by OLLAMA_BASE_URL
base_url: "http://ollama-jira:11434"
# base_url: "https://api-amer-sandbox-gbl-mdm-hub.pfizer.com/ollama"
# Can be overridden by OLLAMA_MODEL
# model: "phi4-mini:latest"
# model: "qwen3:1.7b"
# model: "smollm:360m"
# model: "qwen3:0.6b"
model: "qwen3:4b"
# Langfuse configuration for observability and analytics
langfuse:
# Enable or disable Langfuse integration
# Can be overridden by LANGFUSE_ENABLED environment variable
enabled: false
# Langfuse API credentials
# It's HIGHLY recommended to set these via environment variables
# instead of saving them in this file
public_key: "pk-lf-"
secret_key: "sk-lf-"
# host: "https://cloud.langfuse.com"
# host: "http://192.168.0.122:3000"
# Processor configuration
processor:
# Interval in seconds between polling for new Jira analysis requests
# Can be overridden by PROCESSOR_POLL_INTERVAL_SECONDS environment variable
poll_interval_seconds: 10
# Maximum number of retries for failed Jira analysis requests
# Can be overridden by PROCESSOR_MAX_RETRIES environment variable
max_retries: 0
# Initial delay in seconds before the first retry attempt (exponential backoff)
# Can be overridden by PROCESSOR_INITIAL_RETRY_DELAY_SECONDS environment variable
initial_retry_delay_seconds: 60