jira-webhook-llm/shared_store.py
Ireneusz Bachanowicz 8c1ab79eeb
Some checks are pending
CI/CD Pipeline / test (push) Waiting to run
Refactor LLM analysis chain and models; remove deprecated prompt files
- Updated `chains.py` to streamline imports and improve error handling for LLM initialization.
- Modified `models.py` to enhance the `AnalysisFlags` model with field aliases and added datetime import.
- Deleted outdated prompt files (`jira_analysis_v1.0.0.txt`, `jira_analysis_v1.1.0.txt`, `jira_analysis_v1.2.0.txt`) to clean up the repository.
- Introduced a new prompt file `jira_analysis_v1.2.0.txt` with updated instructions for analysis.
- Removed `logging_config.py` and test files to simplify the codebase.
- Updated webhook handler to improve error handling and logging.
- Added a new shared store for managing processing requests in a thread-safe manner.
2025-07-21 01:06:45 +02:00

91 lines
3.3 KiB
Python

from typing import List, Dict, Optional
import threading
from datetime import datetime, timezone
from enum import Enum
class RequestStatus(str, Enum):
PENDING = "pending"
PROCESSING = "processing"
COMPLETED = "completed"
FAILED = "failed"
TIMEOUT = "timeout"
# Thread-safe storage for requests and responses
from queue import Queue
from dataclasses import dataclass, field
@dataclass
class ProcessingRequest:
id: int
payload: Dict
status: RequestStatus = RequestStatus.PENDING
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
error: Optional[str] = None
retry_count: int = 0
class RequestQueue:
def __init__(self):
self._queue: Queue[ProcessingRequest] = Queue()
self._requests: List[ProcessingRequest] = [] # To store all requests for retrieval
self._processing_lock = threading.Lock()
self._id_lock = threading.Lock()
self._current_id = 0
def _get_next_id(self) -> int:
"""Generate and return the next available request ID"""
with self._id_lock:
self._current_id += 1
return self._current_id
def add_request(self, payload: Dict) -> int:
"""Adds a new request to the queue and returns its ID"""
request_id = self._get_next_id()
request = ProcessingRequest(id=request_id, payload=payload)
self._queue.put(request)
with self._processing_lock: # Protect access to _requests list
self._requests.append(request)
return request_id
def get_next_request(self) -> Optional[ProcessingRequest]:
"""Fetches the next available request from the queue"""
with self._processing_lock:
if not self._queue.empty():
return self._queue.get()
return None
def get_all_requests(self) -> List[ProcessingRequest]:
"""Returns a list of all requests currently in the store."""
with self._processing_lock:
return list(self._requests) # Return a copy to prevent external modification
def get_request_by_id(self, request_id: int) -> Optional[ProcessingRequest]:
"""Retrieves a specific request by its ID."""
with self._processing_lock:
return next((req for req in self._requests if req.id == request_id), None)
def delete_request_by_id(self, request_id: int) -> bool:
"""Deletes a specific request by its ID."""
with self._processing_lock:
initial_length = len(self._requests)
self._requests = [req for req in self._requests if req.id != request_id]
return len(self._requests) < initial_length
def clear_all_requests(self):
"""Clears all requests from the store."""
with self._processing_lock:
self._requests.clear()
# Clear the queue as well, though it's generally processed
while not self._queue.empty():
try:
self._queue.get_nowait()
except Exception:
continue
self._queue.task_done() # Mark all tasks as done if clearing
def task_done(self):
"""Indicates that a formerly enqueued task is complete."""
self._queue.task_done()
requests_queue = RequestQueue()