feat: Implement Jira Webhook Handler with LLM Integration

- Added FastAPI application to handle Jira webhooks.
- Created Pydantic models for Jira payload and LLM output.
- Integrated LangChain with OpenAI and Ollama for LLM processing.
- Set up Langfuse for tracing and monitoring.
- Implemented analysis logic for Jira tickets, including sentiment analysis and label suggestions.
- Added test endpoint for LLM integration.
- Updated requirements.txt to include necessary dependencies and versions.
This commit is contained in:
Ireneusz Bachanowicz 2025-07-13 11:44:19 +02:00
parent 9fdea59554
commit 0c468c0a69
9 changed files with 1632 additions and 0 deletions

49
.gitignore vendored Normal file
View File

@ -0,0 +1,49 @@
# Operating System files
.DS_Store
.localized
Thumbs.db
Desktop.ini
# Python
__pycache__/
*.pyc
*.pyo
*.pyd
.Python
env/
venv/
*.egg
*.egg-info/
build/
dist/
# Editor files (e.g., Visual Studio Code, Sublime Text, Vim)
.vscode/
*.sublime-project
*.sublime-workspace
.idea/
*.swp
*.swo
# Logs and temporary files
log/
*.log
*.tmp
tmp/
# Package managers
node_modules/
yarn.lock
package-lock.json
# Dependencies for compiled languages (e.g., C++, Java)
bin/
obj/
*.exe
*.dll
*.jar
*.class
# Miscellaneous
.env
.DS_Store

52
Dockerfile Normal file
View File

@ -0,0 +1,52 @@
# syntax=docker/dockerfile:1.4
# --- Stage 1: Build Dependencies ---
# Using a specific, stable Python version on Alpine for a small final image.
FROM python:3.10-alpine3.18 AS builder
WORKDIR /app
# Install build dependencies for Python packages.
RUN apk add --no-cache --virtual .build-deps \
build-base \
gcc \
musl-dev \
python3-dev \
linux-headers
# Copy only the requirements file first to leverage Docker's build cache.
COPY requirements.txt .
# Install Python dependencies.
RUN pip install --no-cache-dir -r requirements.txt
# Remove build dependencies to keep the final image lean.
RUN apk del .build-deps
# --- Stage 2: Runtime Environment ---
# Start fresh with a lean Alpine Python image.
FROM python:3.10-alpine3.18
WORKDIR /app
# Copy installed Python packages from the builder stage.
COPY --from=builder /usr/local/lib/python3.10/site-packages /usr/local/lib/python3.10/site-packages
COPY --from=builder /usr/local/bin /usr/local/bin
# Set environment variables for Python.
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1
# Copy the configuration directory first.
# If only code changes, this layer remains cached.
COPY config ./config
# Copy your application source code.
COPY jira-webhook-llm.py .
COPY config.py .
# Expose the port your application listens on.
EXPOSE 8000
# Define the command to run your application.
CMD ["uvicorn", "jira-webhook-llm:app", "--host", "0.0.0.0", "--port", "8000"]

75
config.py Normal file
View File

@ -0,0 +1,75 @@
import os
import sys
import yaml
from loguru import logger
from typing import Optional
# Define a custom exception for configuration errors
class AppConfigError(Exception):
pass
class Settings:
def __init__(self, config_path: str = "config/application.yml"):
"""
Loads configuration from a YAML file and overrides with environment variables.
"""
# --- Load from YAML file ---
try:
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
except FileNotFoundError:
raise AppConfigError(f"Configuration file not found at '{config_path}'.")
except yaml.YAMLError as e:
raise AppConfigError(f"Error parsing YAML file: {e}")
# --- Read and Combine Settings (Environment variables take precedence) ---
llm_config = config.get('llm', {})
# General settings
self.llm_mode: str = os.getenv("LLM_MODE", llm_config.get('mode', 'openai')).lower()
# OpenAI settings
openai_config = llm_config.get('openai', {})
self.openai_api_key: Optional[str] = os.getenv("OPENAI_API_KEY", openai_config.get('api_key'))
self.openai_api_base_url: Optional[str] = os.getenv("OPENAI_API_BASE_URL", openai_config.get('api_base_url'))
self.openai_model: Optional[str] = os.getenv("OPENAI_MODEL", openai_config.get('model'))
# Ollama settings
ollama_config = llm_config.get('ollama', {})
self.ollama_base_url: Optional[str] = os.getenv("OLLAMA_BASE_URL", ollama_config.get('base_url'))
self.ollama_model: Optional[str] = os.getenv("OLLAMA_MODEL", ollama_config.get('model'))
self._validate()
def _validate(self):
"""
Validates that required configuration variables are set.
"""
logger.info(f"LLM mode set to: '{self.llm_mode}'")
if self.llm_mode == 'openai':
if not self.openai_api_key:
raise AppConfigError("LLM mode is 'openai', but OPENAI_API_KEY is not set.")
if not self.openai_api_base_url:
raise AppConfigError("LLM mode is 'openai', but OPENAI_API_BASE_URL is not set.")
if not self.openai_model:
raise AppConfigError("LLM mode is 'openai', but OPENAI_MODEL is not set.")
elif self.llm_mode == 'ollama':
if not self.ollama_base_url:
raise AppConfigError("LLM mode is 'ollama', but OLLAMA_BASE_URL is not set.")
if not self.ollama_model:
raise AppConfigError("LLM mode is 'ollama', but OLLAMA_MODEL is not set.")
else:
raise AppConfigError(f"Invalid LLM_MODE: '{self.llm_mode}'. Must be 'openai' or 'ollama'.")
logger.info("Configuration validated successfully.")
# Create a single, validated instance of the settings to be imported by other modules.
try:
settings = Settings()
except AppConfigError as e:
logger.error(f"FATAL: {e}")
logger.error("Application shutting down due to configuration error.")
sys.exit(1) # Exit the application if configuration is invalid

30
config/application.yml Normal file
View File

@ -0,0 +1,30 @@
# Default application configuration
llm:
# The mode to run the application in.
# Can be 'openai' or 'ollama'.
# This can be overridden by the LLM_MODE environment variable.
mode: ollama
# Settings for OpenAI-compatible APIs (like OpenRouter)
openai:
# It's HIGHLY recommended to set this via an environment variable
# instead of saving it in this file.
# Can be overridden by OPENAI_API_KEY
api_key: "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"
# Can be overridden by OPENAI_API_BASE_URL
api_base_url: "https://openrouter.ai/api/v1"
# Can be overridden by OPENAI_MODEL
model: "deepseek/deepseek-chat:free"
# Settings for Ollama
ollama:
# Can be overridden by OLLAMA_BASE_URL
base_url: "http://localhost:11434"
# Can be overridden by OLLAMA_MODEL
# model: "phi4-mini:latest"
# model: "qwen3:1.7b"
model: "smollm:360m"

16
custom payload JIRA.json Normal file

File diff suppressed because one or more lines are too long

65
docker-compose.yml Normal file
View File

@ -0,0 +1,65 @@
name: jira-llm-stack
services:
# Service for the Ollama server
ollama:
image: ollama/ollama:latest
# Map port 11434 from the container to the host machine
# This allows you to access Ollama directly from your host if needed (e.g., via curl http://localhost:11434)
ports:
- "11434:11434"
# Mount a volume to persist Ollama models and data
# This prevents redownloading models every time the container restarts
volumes:
- ollama_data:/root/.ollama
# CORRECTED COMMAND:
# We explicitly tell Docker to use 'bash -c' to execute the string.
# This ensures that 'ollama pull' and 'ollama serve' are run sequentially.
entrypoint: ["sh"]
command: ["-c", "ollama serve && ollama pull phi4-mini:latest"]
# Restart the container if it exits unexpectedly
restart: unless-stopped
# Service for your FastAPI application
app:
# Build the Docker image for your app from the current directory (where Dockerfile is located)
build: .
# Map port 8000 from the container to the host machine
# This allows you to access your FastAPI app at http://localhost:8000
ports:
- "8000:8000"
# Define environment variables for your FastAPI application
# These will be read by pydantic-settings in your app
environment:
# Set the LLM mode to 'ollama'
LLM_MODE: ollama
# Point to the Ollama service within the Docker Compose network
# 'ollama' is the service name, which acts as a hostname within the network
OLLAMA_BASE_URL: http://192.168.0.122:11434
# Specify the model to use
OLLAMA_MODEL: gemma3:1b
# If you have an OpenAI API key in your settings, but want to ensure it's not used
# when LLM_MODE is ollama, you can explicitly set it to empty or omit it.
# OPENAI_API_KEY: ""
# OPENAI_MODEL: ""
# Ensure the Ollama service starts and is healthy before starting the app
depends_on:
- ollama
# Restart the container if it exits unexpectedly
restart: unless-stopped
# Mount your current project directory into the container
# This is useful for development, as changes to your code will be reflected
# without rebuilding the image (if you're using a hot-reloading server like uvicorn --reload)
# For production, you might remove this and rely solely on the Dockerfile copy.
volumes:
- .:/app
# Command to run your FastAPI application using Uvicorn
# --host 0.0.0.0 is crucial for the app to be accessible from outside the container
# --reload is good for development; remove for production
command: uvicorn jira-webhook-llm:app --host 0.0.0.0 --port 8000 --reload
# Define named volumes for persistent data
volumes:
ollama_data:
driver: local

1056
full JIRA payload.json Normal file

File diff suppressed because one or more lines are too long

279
jira-webhook-llm.py Normal file
View File

@ -0,0 +1,279 @@
import os
import json
from typing import Optional, List, Union
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, Field, ConfigDict, validator
from loguru import logger
# Import your new settings object
from config import settings
# LangChain imports
from langchain_ollama import OllamaLLM
from langchain_openai import ChatOpenAI
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from pydantic import BaseModel as LCBaseModel
from pydantic import field_validator
# Langfuse imports
from langfuse import Langfuse, get_client
from langfuse.langchain import CallbackHandler
# LANGFUSE_PUBLIC_KEY="pk_lf_..."
# LANGFUSE_SECRET_KEY="sk_lf_..."
# LANGFUSE_HOST="https://cloud.langfuse.com" # Or "https://us.cloud.langfuse.com" for US region, or your self-hosted instance
langfuse = Langfuse(
secret_key="sk-lf-55d5fa70-e2d3-44d0-ae76-48181126d7ed",
public_key="pk-lf-0f6178ee-e6aa-4cb7-a433-6c00c6512874",
host="https://cloud.langfuse.com"
)
# Initialize Langfuse client (optional, get_client() uses environment variables by default)
# It's good practice to initialize it early to ensure connection.
try:
langfuse_client = get_client()
if langfuse_client.auth_check():
logger.info("Langfuse client authenticated successfully.")
else:
logger.warning("Langfuse authentication failed. Check your API keys and host.")
except Exception as e:
logger.error(f"Failed to initialize Langfuse client: {e}")
# Depending on your tolerance, you might want to exit or continue without tracing
# For now, we'll just log and continue, but traces won't be sent.
# --- Pydantic Models for Jira Payload and LLM Output ---
# Configuration for Pydantic to handle camelCase to snake_case conversion
class JiraWebhookPayload(BaseModel):
model_config = ConfigDict(alias_generator=lambda x: ''.join(word.capitalize() if i > 0 else word for i, word in enumerate(x.split('_'))), populate_by_name=True)
issueKey: str
summary: str
description: Optional[str] = None
comment: Optional[str] = None # Assuming this is the *new* comment that triggered the webhook
labels: Optional[Union[List[str], str]] = []
@field_validator('labels', mode='before') # `pre=True` becomes `mode='before'`
@classmethod # V2 validators must be classmethods
def convert_labels_to_list(cls, v):
if isinstance(v, str):
return [v]
return v or [] # Return an empty list if v is None/empty
status: Optional[str] = None
assignee: Optional[str] = None
updated: Optional[str] = None # Timestamp string
# Define the structure of the LLM's expected JSON output
class AnalysisFlags(LCBaseModel):
hasMultipleEscalations: bool = Field(description="Is there evidence of multiple escalation attempts or channels?")
requiresUrgentAttention: bool = Field(description="Does the issue convey a sense of urgency beyond standard priority?")
customerSentiment: Optional[str] = Field(description="Overall customer sentiment (e.g., 'neutral', 'frustrated', 'calm').")
suggestedLabels: List[str] = Field(description="List of suggested Jira labels, e.g., ['escalated', 'high-customer-impact'].")
summaryOfConcerns: Optional[str] = Field(description="A concise summary of the key concerns or problems in the ticket.")
# --- LLM Setup (Now dynamic based on config) ---
llm = None
if settings.llm_mode == 'openai':
logger.info(f"Initializing ChatOpenAI with model: {settings.openai_model}")
llm = ChatOpenAI(
model=settings.openai_model,
temperature=0.7,
max_tokens=2000,
api_key=settings.openai_api_key,
base_url=settings.openai_api_base_url
)
elif settings.llm_mode == 'ollama':
logger.info(f"Initializing OllamaLLM with model: {settings.ollama_model} at {settings.ollama_base_url}")
llm = OllamaLLM(
model=settings.ollama_model,
base_url=settings.ollama_base_url,
streaming=False
)
# This check is now redundant because config.py would have exited, but it's good for clarity.
if llm is None:
logger.error("LLM could not be initialized. Exiting.")
sys.exit(1)
app = FastAPI()
# Set up Output Parser for structured JSON
parser = JsonOutputParser(pydantic_object=AnalysisFlags)
# Prompt Template for LLM
prompt_template = PromptTemplate(
template="""
You are an AI assistant designed to analyze Jira ticket details and extract key flags and sentiment.
Analyze the following Jira ticket information and provide your analysis in a JSON format.
Ensure the JSON strictly adheres to the specified schema.
Consider the overall context of the ticket and specifically the latest comment if provided.
Issue Key: {issueKey}
Summary: {summary}
Description: {description}
Status: {status}
Existing Labels: {labels}
Assignee: {assignee}
Last Updated: {updated}
Latest Comment (if applicable): {comment}
**Analysis Request:**
- Determine if there are signs of multiple escalation attempts in the descriptions or comments.
- Assess if the issue requires urgent attention based on language or context from the summary, description, or latest comment.
- Summarize the overall customer sentiment evident in the issue.
- Suggest relevant Jira labels that should be applied to this issue.
- Provide a concise summary of the key concerns or problems described in the ticket.
- Generate a concise, objective comment (max 2-3 sentences) suitable for directly adding to the Jira ticket, summarizing the AI's findings.
{format_instructions}
""",
input_variables=[
"issueKey", "summary", "description", "status", "labels",
"assignee", "updated", "comment"
],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
# Chain for LLM invocation
analysis_chain = prompt_template | llm | parser
# --- Webhook Endpoint ---
@app.post("/jira-webhook")
async def jira_webhook_handler(payload: JiraWebhookPayload):
# Initialize Langfuse CallbackHandler for this request
# This ensures each webhook invocation gets its own trace in Langfuse
langfuse_handler = CallbackHandler()
try:
logger.info(f"Received webhook for Jira issue: {payload.issueKey}")
# Prepare payload for LangChain:
# 1. Use the 'comment' field directly if it exists, as it's typically the trigger.
# 2. Convert Optional fields to usable strings for the prompt.
# This mapping handles potential None values in the payload
llm_input = {
"issueKey": payload.issueKey,
"summary": payload.summary,
"description": payload.description if payload.description else "No description provided.",
"status": payload.status if payload.status else "Unknown",
"labels": ", ".join(payload.labels) if payload.labels else "None",
"assignee": payload.assignee if payload.assignee else "Unassigned",
"updated": payload.updated if payload.updated else "Unknown",
"comment": payload.comment if payload.comment else "No new comment provided."
}
# Pass data to LangChain for analysis
# Using ainvoke for async execution
# Add the Langfuse handler to the config of the ainvoke call
analysis_result = await analysis_chain.ainvoke(
llm_input,
config={
"callbacks": [langfuse_handler],
"metadata": {
"trace_name": f"JiraWebhook-{payload.issueKey}"
}
}
)
logger.debug(f"LLM Analysis Result for {payload.issueKey}: {json.dumps(analysis_result, indent=2)}")
return {"status": "success", "analysis_flags": analysis_result}
except Exception as e:
logger.error(f"Error processing webhook: {e}")
import traceback
traceback.print_exc() # Print full traceback for debugging
# In case of an error, you might want to log it to Langfuse as well
# You can update the trace with an error
if langfuse_handler.trace: # Check if the trace was started
langfuse_handler.trace.update(
status_message=f"Error: {str(e)}",
level="ERROR"
)
raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}")
finally:
# It's good practice to flush the Langfuse client to ensure all events are sent
# This is especially important in short-lived processes or serverless functions
# For a long-running FastAPI app, the client's internal queue usually handles this
# but explicit flush can be useful for immediate visibility or during testing.
if langfuse_client:
langfuse_client.flush()
# To run this:
# 1. Set OPENAI_API_KEY, LANGFUSE_PUBLIC_KEY, LANGFUSE_SECRET_KEY, LANGFUSE_HOST environment variables
# 2. Start FastAPI: uvicorn main:app --host 0.0.0.0 --port 8000 --reload
@app.post("/test-llm")
async def test_llm():
"""Test endpoint for LLM integration"""
# Correctly initialize the Langfuse CallbackHandler.
# It inherits the client configuration from the global 'langfuse' instance.
# If you need to name the trace, you do so in the 'ainvoke' call's metadata.
langfuse_handler = CallbackHandler(
# The constructor does not take 'trace_name'.
# Remove it from here.
)
test_payload = {
"issueKey": "TEST-123",
"summary": "Test issue",
"description": "This is a test issue for LLM integration",
"comment": "Testing OpenAI integration with Langfuse",
"labels": ["test"],
"status": "Open",
"assignee": "Tester",
"updated": "2025-07-04T21:40:00Z"
}
try:
llm_input = {
"issueKey": test_payload["issueKey"],
"summary": test_payload["summary"],
"description": test_payload["description"],
"status": test_payload["status"],
"labels": ", ".join(test_payload["labels"]),
"assignee": test_payload["assignee"],
"updated": test_payload["updated"],
"comment": test_payload["comment"]
}
# To name the trace, you pass it in the config's metadata
result = await analysis_chain.ainvoke(
llm_input,
config={
"callbacks": [langfuse_handler],
"metadata": {
"trace_name": "TestLLM" # Correct way to name the trace
}
}
)
return {
"status": "success",
"result": result
}
except Exception as e:
if langfuse_handler.trace:
langfuse_handler.trace.update(
status_message=f"Error in test-llm: {str(e)}",
level="ERROR"
)
logger.error(f"Error in /test-llm: {e}")
return {
"status": "error",
"message": str(e)
}
finally:
if langfuse_client:
langfuse_client.flush()

10
requirements.txt Normal file
View File

@ -0,0 +1,10 @@
fastapi==0.111.0
pydantic==2.9.0 # Changed from 2.7.4 to meet ollama's requirement
pydantic-settings==2.0.0
langchain-ollama==0.3.3
langchain-openai==0.3.27
langchain-core==0.3.68
uvicorn==0.30.1
python-multipart==0.0.9 # Good to include for FastAPI forms
loguru==0.7.3
langfuse==3.1.3