#!/usr/bin/env python3 import sys import os import argparse from dotenv import load_dotenv from openai import OpenAI from pdfminer.high_level import extract_text # Load environment variables from .env file load_dotenv() client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) def analyze_resume(text): response = client.chat.completions.create( model="gpt-4o-mini", messages=[{ "role": "system", "content": "Provide a concise summary of the resume, highlighting key skills and potential areas for improvement, in a few sentences." }, {"role": "user", "content": text}], max_tokens=200 # Add a max_tokens parameter to limit the output length ) return response if __name__ == "__main__": parser = argparse.ArgumentParser(description="Analyze resume text using OpenAI.") parser.add_argument("-f", "--file", help="Path to the file containing the resume text.") args = parser.parse_args() if args.file: try: with open(args.file, "r", encoding="latin-1") as f: text_content = f.read() except FileNotFoundError: print(f"Error: File not found: {args.file}") sys.exit(1) elif len(sys.argv) > 1: text_content = sys.argv[1] else: parser.print_help() sys.exit(1) response = analyze_resume(text_content) summary = response.choices[0].message.content # Print usage information input_tokens = response.usage.prompt_tokens output_tokens = response.usage.completion_tokens total_tokens = response.usage.total_tokens print(f"Summary: {summary}") print(f"\n--- Usage Information ---") print(f"Input tokens: {input_tokens}") print(f"Output tokens: {output_tokens}") print(f"Total tokens: {total_tokens}") print(f"Cost: ${total_tokens * 0.000001:.6f}") # rough estimate print("\n--- Summary from OpenAI ---") print(f"Total tokens used: {total_tokens}")