CV/my-app/utils/tests/test_resume_analysis.py
2025-03-14 00:59:09 +01:00

174 lines
6.3 KiB
Python

import os
import sys
import pytest
from unittest.mock import patch, MagicMock
import json
import logging
import argparse # Import argparse
from dotenv import load_dotenv
# Add the project root to the sys path to allow imports from the main package
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from resume_analysis import (
call_openai_api,
insert_processing_data,
load_mockup_response,
main,
get_mongo_collection
)
# Load environment variables for testing
load_dotenv()
# Constants for Mocking
MOCKUP_FILE_PATH = os.path.join(os.path.dirname(__file__), 'mockup_response.json')
TEST_RESUME_PATH = os.path.join(os.path.dirname(__file__), 'test_resume.txt')
# Create a logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Create a handler and set the formatter
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# Add the handler to the logger
logger.addHandler(ch)
# Mockup response data
MOCKUP_RESPONSE_DATA = {
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1677652288,
"model": "gpt-3.5-turbo-0301",
"usage": {
"prompt_tokens": 100,
"completion_tokens": 200,
"total_tokens": 300
},
"choices": [
{
"message": {
"role": "assistant",
"content": '{"openai_stats": {"prompt_tokens": 100, "completion_tokens": 200, "total_tokens": 300}}'
},
"finish_reason": "stop",
"index": 0
}
]
}
# Fixtures
@pytest.fixture
def mock_openai_response():
mock_response = MagicMock()
mock_response.id = "chatcmpl-123"
mock_response.object = "chat.completion"
mock_response.created = 1677652288
mock_response.model = "gpt-3.5-turbo-0301"
mock_response.usage = MagicMock(prompt_tokens=100, completion_tokens=200, total_tokens=300)
mock_response.choices = [MagicMock(message=MagicMock(role="assistant", content='{"openai_stats": {"prompt_tokens": 100, "completion_tokens": 200, "total_tokens": 300}}'), finish_reason="stop", index=0)]
return mock_response
@pytest.fixture
def test_resume_file():
# Create a dummy resume file for testing
with open(TEST_RESUME_PATH, 'w') as f:
f.write("This is a test resume.")
yield TEST_RESUME_PATH
os.remove(TEST_RESUME_PATH)
@pytest.fixture
def mock_mongo_collection():
# Mock MongoDB collection for testing
class MockMongoCollection:
def __init__(self):
self.inserted_data = None
def insert_one(self, data):
self.inserted_data = data
return MockMongoCollection()
# Unit Tests
def test_load_mockup_response():
# Create a mockup response file
with open(MOCKUP_FILE_PATH, 'w') as f:
json.dump(MOCKUP_RESPONSE_DATA, f)
response = load_mockup_response(MOCKUP_FILE_PATH)
assert response == MOCKUP_RESPONSE_DATA
os.remove(MOCKUP_FILE_PATH)
def test_load_mockup_response_file_not_found():
with pytest.raises(FileNotFoundError):
load_mockup_response("non_existent_file.json")
@patch("resume_analysis.openai.chat.completions.create")
def test_call_openai_api_success(mock_openai_chat_completions_create, mock_openai_response):
mock_openai_chat_completions_create.return_value = mock_openai_response
response = call_openai_api("test resume text", False)
assert response == mock_openai_response
@patch("resume_analysis.openai.chat.completions.create")
def test_call_openai_api_failure(mock_openai_chat_completions_create):
mock_openai_chat_completions_create.side_effect = Exception("API error")
response = call_openai_api("test resume text", False)
assert response is None
def test_call_openai_api_mockup_mode():
# Create a mockup response file
with open(MOCKUP_FILE_PATH, 'w') as f:
json.dump(MOCKUP_RESPONSE_DATA, f)
response = call_openai_api("test resume text", True)
assert response == MOCKUP_RESPONSE_DATA
os.remove(MOCKUP_FILE_PATH)
def test_insert_processing_data_success(mock_openai_response, mock_mongo_collection):
args = argparse.Namespace(file="test.pdf")
cost = insert_processing_data("test resume text", {}, mock_openai_response, args, "test_id", False, mock_mongo_collection)
assert mock_mongo_collection.inserted_data is not None
assert cost == 0
def test_insert_processing_data_mockup_mode(mock_mongo_collection):
args = argparse.Namespace(file="test.pdf")
cost = insert_processing_data("test resume text", {}, MOCKUP_RESPONSE_DATA, args, "test_id", True, mock_mongo_collection)
assert mock_mongo_collection.inserted_data is None
assert cost == 0
@patch("resume_analysis.get_mongo_collection")
def test_main_success(mock_get_mongo_collection, test_resume_file, mock_openai_response):
mock_get_mongo_collection.return_value.insert_one.return_value = None
with patch("resume_analysis.call_openai_api") as mock_call_openai_api:
mock_call_openai_api.return_value = mock_openai_response
with patch("resume_analysis.write_openai_response") as mock_write_openai_response:
sys.argv = ["resume_analysis.py", "-f", test_resume_file]
main()
assert mock_call_openai_api.called
assert mock_write_openai_response.called
@patch("resume_analysis.get_mongo_collection")
def test_main_mockup_mode(mock_get_mongo_collection, test_resume_file, mock_openai_response):
mock_get_mongo_collection.return_value.insert_one.return_value = None
with patch("resume_analysis.call_openai_api") as mock_call_openai_api:
mock_call_openai_api.return_value = mock_openai_response
with patch("resume_analysis.write_openai_response") as mock_write_openai_response:
sys.argv = ["resume_analysis.py", "-f", test_resume_file, "-m"]
main()
assert mock_call_openai_api.called
assert mock_write_openai_response.called
def test_main_file_not_found():
with pytest.raises(SystemExit) as pytest_wrapped_e:
sys.argv = ["resume_analysis.py", "-f", "non_existent_file.pdf"]
main()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
def test_get_mongo_collection():
# Test that the function returns a valid MongoDB collection object
collection = get_mongo_collection()
assert collection is not None