jira-webhook-llm/docker-compose.yml
Ireneusz Bachanowicz 0c468c0a69 feat: Implement Jira Webhook Handler with LLM Integration
- Added FastAPI application to handle Jira webhooks.
- Created Pydantic models for Jira payload and LLM output.
- Integrated LangChain with OpenAI and Ollama for LLM processing.
- Set up Langfuse for tracing and monitoring.
- Implemented analysis logic for Jira tickets, including sentiment analysis and label suggestions.
- Added test endpoint for LLM integration.
- Updated requirements.txt to include necessary dependencies and versions.
2025-07-13 11:44:19 +02:00

65 lines
2.8 KiB
YAML

name: jira-llm-stack
services:
# Service for the Ollama server
ollama:
image: ollama/ollama:latest
# Map port 11434 from the container to the host machine
# This allows you to access Ollama directly from your host if needed (e.g., via curl http://localhost:11434)
ports:
- "11434:11434"
# Mount a volume to persist Ollama models and data
# This prevents redownloading models every time the container restarts
volumes:
- ollama_data:/root/.ollama
# CORRECTED COMMAND:
# We explicitly tell Docker to use 'bash -c' to execute the string.
# This ensures that 'ollama pull' and 'ollama serve' are run sequentially.
entrypoint: ["sh"]
command: ["-c", "ollama serve && ollama pull phi4-mini:latest"]
# Restart the container if it exits unexpectedly
restart: unless-stopped
# Service for your FastAPI application
app:
# Build the Docker image for your app from the current directory (where Dockerfile is located)
build: .
# Map port 8000 from the container to the host machine
# This allows you to access your FastAPI app at http://localhost:8000
ports:
- "8000:8000"
# Define environment variables for your FastAPI application
# These will be read by pydantic-settings in your app
environment:
# Set the LLM mode to 'ollama'
LLM_MODE: ollama
# Point to the Ollama service within the Docker Compose network
# 'ollama' is the service name, which acts as a hostname within the network
OLLAMA_BASE_URL: http://192.168.0.122:11434
# Specify the model to use
OLLAMA_MODEL: gemma3:1b
# If you have an OpenAI API key in your settings, but want to ensure it's not used
# when LLM_MODE is ollama, you can explicitly set it to empty or omit it.
# OPENAI_API_KEY: ""
# OPENAI_MODEL: ""
# Ensure the Ollama service starts and is healthy before starting the app
depends_on:
- ollama
# Restart the container if it exits unexpectedly
restart: unless-stopped
# Mount your current project directory into the container
# This is useful for development, as changes to your code will be reflected
# without rebuilding the image (if you're using a hot-reloading server like uvicorn --reload)
# For production, you might remove this and rely solely on the Dockerfile copy.
volumes:
- .:/app
# Command to run your FastAPI application using Uvicorn
# --host 0.0.0.0 is crucial for the app to be accessible from outside the container
# --reload is good for development; remove for production
command: uvicorn jira-webhook-llm:app --host 0.0.0.0 --port 8000 --reload
# Define named volumes for persistent data
volumes:
ollama_data:
driver: local