From e1483e0a292737d0f8c095da8700c248b025dbc3 Mon Sep 17 00:00:00 2001 From: Ireneusz Bachanowicz Date: Sun, 2 Mar 2025 01:53:53 +0100 Subject: [PATCH] Prompt in external file --- my-app/utils/prompt.txt | 1 + my-app/utils/resume_analysis.py | 9 +++++---- visual-inspiration | 1 - 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 my-app/utils/prompt.txt delete mode 160000 visual-inspiration diff --git a/my-app/utils/prompt.txt b/my-app/utils/prompt.txt new file mode 100644 index 0000000..294249d --- /dev/null +++ b/my-app/utils/prompt.txt @@ -0,0 +1 @@ +Provide a concise summary of the resume, highlighting key skills and potential areas for improvement, in a at least 5 sentences. diff --git a/my-app/utils/resume_analysis.py b/my-app/utils/resume_analysis.py index 6c8770f..0e0f8cf 100644 --- a/my-app/utils/resume_analysis.py +++ b/my-app/utils/resume_analysis.py @@ -2,24 +2,25 @@ import sys import os import argparse +import io from dotenv import load_dotenv from openai import OpenAI from pdfminer.high_level import extract_text # Load environment variables from .env file -load_dotenv() +load_dotenv(dotenv_path=os.path.join(os.path.dirname(__file__), '.env')) client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) def analyze_resume(text): response = client.chat.completions.create( - model="gpt-4o-mini", + model=os.getenv("MODEL_NAME"), messages=[{ "role": "system", - "content": "Provide a concise summary of the resume, highlighting key skills and potential areas for improvement, in a few sentences." + "content": open(os.path.join(os.path.dirname(__file__), "prompt.txt"), "r").read() }, {"role": "user", "content": text}], - max_tokens=200 # Add a max_tokens parameter to limit the output length + max_tokens=int(os.getenv("MAX_TOKENS")) ) return response diff --git a/visual-inspiration b/visual-inspiration deleted file mode 160000 index be751b7..0000000 --- a/visual-inspiration +++ /dev/null @@ -1 +0,0 @@ -Subproject commit be751b77fd71ac830d81090ad792091493040729