Some checks are pending
CI/CD Pipeline / test (push) Waiting to run
- Simplified the FastAPI application structure and improved error handling with middleware. - Introduced a retry decorator for asynchronous functions to enhance reliability. - Modularized the LLM initialization and prompt loading into separate functions for better maintainability. - Updated Pydantic models for Jira webhook payload and analysis flags to ensure proper validation and structure. - Implemented a structured logging configuration for better traceability and debugging. - Added comprehensive unit tests for prompt loading, response validation, and webhook handling. - Established a CI/CD pipeline with GitHub Actions for automated testing and coverage reporting. - Enhanced the prompt template for LLM analysis to include specific instructions for handling escalations.
29 lines
971 B
Python
29 lines
971 B
Python
import unittest
|
|
from llm.chains import load_prompt_template, validate_response
|
|
from llm.models import AnalysisFlags
|
|
|
|
class PromptTests(unittest.TestCase):
|
|
def test_prompt_loading(self):
|
|
"""Test that prompt template loads correctly"""
|
|
try:
|
|
template = load_prompt_template()
|
|
self.assertIsNotNone(template)
|
|
self.assertIn("issueKey", template.input_variables)
|
|
except Exception as e:
|
|
self.fail(f"Prompt loading failed: {str(e)}")
|
|
|
|
def test_response_validation(self):
|
|
"""Test response validation logic"""
|
|
valid_response = {
|
|
"hasMultipleEscalations": False,
|
|
"customerSentiment": "neutral"
|
|
}
|
|
invalid_response = {
|
|
"customerSentiment": "neutral"
|
|
}
|
|
|
|
self.assertTrue(validate_response(valid_response))
|
|
self.assertFalse(validate_response(invalid_response))
|
|
|
|
if __name__ == "__main__":
|
|
unittest.main() |