diff --git a/codex/README.md b/codex/README.md new file mode 100644 index 0000000..2892ed2 --- /dev/null +++ b/codex/README.md @@ -0,0 +1,27 @@ +# Resume Tailoring Skill (Codex Variant) + +This folder contains the Codex-specific version of the resume tailoring skill. + +- Root `SKILL.md` is kept for Claude Code. +- `codex/SKILL.md` is for Codex. + +## Install in Codex + +```bash +python3 ~/.codex/skills/.system/skill-installer/scripts/install-skill-from-github.py \ + --repo varunr89/resume-tailoring-skill \ + --ref master \ + --path codex \ + --name resume-tailoring +``` + +Restart Codex after installation. + +## Files + +- `SKILL.md` - Codex-only workflow +- `research-prompts.md` - company and role research templates +- `matching-strategies.md` - confidence scoring and mapping logic +- `branching-questions.md` - experience discovery patterns +- `multi-job-workflow.md` - batch workflow details +- `scripts/export_resume.py` - local DOCX/PDF export helper diff --git a/codex/SKILL.md b/codex/SKILL.md new file mode 100644 index 0000000..71d5a9a --- /dev/null +++ b/codex/SKILL.md @@ -0,0 +1,1323 @@ +--- +name: resume-tailoring +description: Use when creating tailored resumes for job applications - researches company/role, creates optimized templates, conducts branching experience discovery to surface undocumented skills, and generates professional multi-format resumes from user's resume library while maintaining factual integrity +--- + +# Resume Tailoring Skill + +## Overview + +Generates high-quality, tailored resumes optimized for specific job descriptions while maintaining factual integrity. Builds resumes around the holistic person by surfacing undocumented experiences through conversational discovery. + +This version is adapted for Codex environments (tool-agnostic research + local CLI document export). + +**Core Principle:** Truth-preserving optimization - maximize fit while maintaining factual integrity. Never fabricate experience, but intelligently reframe and emphasize relevant aspects. + +**Mission:** A person's ability to get a job should be based on their experiences and capabilities, not on their resume writing skills. + +## When to Use + +Use this skill when: +- User provides a job description and wants a tailored resume +- User has multiple existing resumes in markdown format +- User wants to optimize their application for a specific role/company +- User needs help surfacing and articulating undocumented experiences + +**DO NOT use for:** +- Generic resume writing from scratch (user needs existing resume library) +- Cover letters (different skill) +- LinkedIn profile optimization (different skill) + +## Quick Start + +**Required from user:** +1. Job description (text or URL) +2. Resume library location (defaults to `resumes/` in current directory) + +**Workflow:** +1. Build library from existing resumes +2. Research company/role +3. Create template (with user checkpoint) +4. Optional: Branching experience discovery +5. Match content with confidence scoring +6. Generate MD + DOCX + PDF + Report +7. User review → Optional library update + +## Implementation + +See supporting files: +- `research-prompts.md` - Structured prompts for company/role research +- `matching-strategies.md` - Content matching algorithms and scoring +- `branching-questions.md` - Experience discovery conversation patterns + +## Workflow Details + +### Multi-Job Detection + +**Triggers when user provides:** +- Multiple JD URLs (comma or newline separated) +- Phrases: "multiple jobs", "several positions", "batch", "3 jobs" +- List of companies/roles: "Microsoft PM, Google TPM, AWS PM" + +**Detection Logic:** + +```python +# Pseudo-code +def detect_multi_job(user_input): + indicators = [ + len(extract_urls(user_input)) > 1, + any(phrase in user_input.lower() for phrase in + ["multiple jobs", "several positions", "batch of", "3 jobs", "5 jobs"]), + count_company_mentions(user_input) > 1 + ] + return any(indicators) +``` + +**If detected:** +``` +"I see you have multiple job applications. Would you like to use +multi-job mode? + +BENEFITS: +- Shared experience discovery (faster - ask questions once for all jobs) +- Batch processing with progress tracking +- Incremental additions (add more jobs later) + +TIME COMPARISON (3 similar jobs): +- Sequential single-job: ~45 minutes (15 min × 3) +- Multi-job mode: ~40 minutes (15 min discovery + 8 min per job) + +Use multi-job mode? (Y/N)" +``` + +**If user confirms Y:** +- Use multi-job workflow (see multi-job-workflow.md) + +**If user confirms N or single job detected:** +- Use existing single-job workflow (Phase 0 onwards) + +**Backward Compatibility:** Single-job workflow completely unchanged. + +**Multi-Job Workflow:** + +When multi-job mode is activated, see `multi-job-workflow.md` for complete workflow. + +**High-Level Multi-Job Process:** + +``` +┌─────────────────────────────────────────────────────────────┐ +│ PHASE 0: Intake & Batch Initialization │ +│ - Collect 3-5 job descriptions │ +│ - Initialize batch structure │ +│ - Run library initialization (once) │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ PHASE 1: Aggregate Gap Analysis │ +│ - Extract requirements from all JDs │ +│ - Cross-reference against library │ +│ - Build unified gap map (deduplicate) │ +│ - Prioritize: Critical → Important → Job-specific │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ PHASE 2: Shared Experience Discovery │ +│ - Single branching interview covering ALL gaps │ +│ - Multi-job context for each question │ +│ - Tag experiences with job relevance │ +│ - Enrich library with discoveries │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ PHASE 3: Per-Job Processing (Sequential) │ +│ For each job: │ +│ ├─ Research (company + role benchmarking) │ +│ ├─ Template generation │ +│ ├─ Content matching (uses enriched library) │ +│ └─ Generation (MD + DOCX + Report) │ +│ Interactive or Express mode │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ PHASE 4: Batch Finalization │ +│ - Generate batch summary │ +│ - User reviews all resumes together │ +│ - Approve/revise individual or batch │ +│ - Update library with approved resumes │ +└─────────────────────────────────────────────────────────────┘ +``` + +**Time Savings:** +- 3 jobs: ~40 min (vs 45 min sequential) = 11% savings +- 5 jobs: ~55 min (vs 75 min sequential) = 27% savings + +**Quality:** Same depth as single-job workflow (research, matching, generation) + +**See `multi-job-workflow.md` for complete implementation details.** + +### Phase 0: Library Initialization + +**Always runs first - builds fresh resume database** + +**Process:** + +1. **Locate resume directory:** + ``` + User provides path OR default to ./resumes/ + Validate directory exists + ``` + +2. **Scan for markdown files:** + ``` + Use shell/file tools to list markdown files (e.g., rg --files {resume_directory} | rg '\.md$') + Count files found + Announce: "Building resume library... found {N} resumes" + ``` + +3. **Parse each resume:** + For each resume file: + - Load content using available file-read tools + - Extract sections: roles, bullets, skills, education + - Identify patterns: bullet structure, length, formatting + +4. **Build experience database structure:** + ```json + { + "roles": [ + { + "role_id": "company_title_year", + "company": "Company Name", + "title": "Job Title", + "dates": "YYYY-YYYY", + "description": "Role summary", + "bullets": [ + { + "text": "Full bullet text", + "themes": ["leadership", "technical"], + "metrics": ["17x improvement", "$3M revenue"], + "keywords": ["cross-functional", "program"], + "source_resumes": ["resume1.md"] + } + ] + } + ], + "skills": { + "technical": ["Python", "Kusto", "AI/ML"], + "product": ["Roadmap", "Strategy"], + "leadership": ["Stakeholder mgmt"] + }, + "education": [...], + "user_preferences": { + "typical_length": "1-page|2-page", + "section_order": ["summary", "experience", "education"], + "bullet_style": "pattern" + } + } + ``` + +5. **Tag content automatically:** + - Themes: Scan for keywords (leadership, technical, analytics, etc.) + - Metrics: Extract numbers, percentages, dollar amounts + - Keywords: Frequent technical terms, action verbs + +**Output:** In-memory database ready for matching + +**Code pattern:** +```python +# Pseudo-code for reference +library = { + "roles": [], + "skills": {}, + "education": [] +} + +for resume_file in glob("resumes/*.md"): + content = read(resume_file) + roles = extract_roles(content) + for role in roles: + role["bullets"] = tag_bullets(role["bullets"]) + library["roles"].append(role) + +return library +``` + +### Phase 1: Research Phase + +**Goal:** Build comprehensive "success profile" beyond just the job description + +**Inputs:** +- Job description (text or URL from user) +- Optional: Company name if not in JD + +**Process:** + +**1.1 Job Description Parsing:** +``` +Use research-prompts.md JD parsing template +Extract: requirements, keywords, implicit preferences, red flags, role archetype +``` + +**1.2 Company Research:** +``` +Use available web search/browsing tools: +- "{company} mission values culture" +- "{company} engineering blog" +- "{company} recent news" + +Synthesize: mission, values, business model, stage +``` + +**1.3 Role Benchmarking:** +``` +Search: "site:linkedin.com {job_title} {company}" +Open top 3-5 relevant profiles/pages and extract patterns +Analyze: common backgrounds, skills, terminology + +If sparse results, try similar companies +``` + +**1.4 Success Profile Synthesis:** +``` +Combine all research into structured profile (see research-prompts.md template) + +Include: +- Core requirements (must-have) +- Valued capabilities (nice-to-have) +- Cultural fit signals +- Narrative themes +- Terminology map (user's background → their language) +- Risk factors + mitigations +``` + +**Checkpoint:** +``` +Present success profile to user: + +"Based on my research, here's what makes candidates successful for this role: + +{SUCCESS_PROFILE_SUMMARY} + +Key findings: +- {Finding 1} +- {Finding 2} +- {Finding 3} + +Does this match your understanding? Any adjustments?" + +Wait for user confirmation before proceeding. +``` + +**Output:** Validated success profile document + +### Phase 2: Template Generation + +**Goal:** Create resume structure optimized for this specific role + +**Inputs:** +- Success profile (from Phase 1) +- User's resume library (from Phase 0) + +**Process:** + +**2.1 Analyze User's Resume Library:** +``` +Extract from library: +- All roles, titles, companies, date ranges +- Role archetypes (technical contributor, manager, researcher, specialist) +- Experience clusters (what domains/skills appear frequently) +- Career progression and narrative +``` + +**2.2 Role Consolidation Decision:** + +**When to consolidate:** +- Same company, similar responsibilities +- Target role values continuity over granular progression +- Combined narrative stronger than separate +- Page space constrained + +**When to keep separate:** +- Different companies (ALWAYS separate) +- Dramatically different responsibilities that both matter +- Target role values specific progression story +- One position has significantly more relevant experience + +**Decision template:** +``` +For {Company} with {N} positions: + +OPTION A (Consolidated): +Title: "{Combined_Title}" +Dates: "{First_Start} - {Last_End}" +Rationale: {Why consolidation makes sense} + +OPTION B (Separate): +Position 1: "{Title}" ({Dates}) +Position 2: "{Title}" ({Dates}) +Rationale: {Why separate makes sense} + +RECOMMENDED: Option {A/B} because {reasoning} +``` + +**2.3 Title Reframing Principles:** + +**Core rule:** Stay truthful to what you did, emphasize aspect most relevant to target + +**Strategies:** + +1. **Emphasize different aspects:** + - "Graduate Researcher" → "Research Software Engineer" (if coding-heavy) + - "Data Science Lead" → "Technical Program Manager" (if leadership) + +2. **Use industry-standard terminology:** + - "Scientist III" → "Senior Research Scientist" (clearer seniority) + - "Program Coordinator" → "Project Manager" (standard term) + +3. **Add specialization when truthful:** + - "Engineer" → "ML Engineer" (if ML work substantial) + - "Researcher" → "Computational Ecologist" (if computational methods) + +4. **Adjust seniority indicators:** + - "Lead" vs "Senior" vs "Staff" based on scope + +**Constraints:** +- NEVER claim work you didn't do +- NEVER inflate seniority beyond defensible +- Company name and dates MUST be exact +- Core responsibilities MUST be accurate + +**2.4 Generate Template Structure:** + +```markdown +## Professional Summary +[GUIDANCE: {X} sentences emphasizing {themes from success profile}] +[REQUIRED ELEMENTS: {keywords from JD}] + +## Key Skills +[STRUCTURE: {2-4 categories based on JD structure}] +[SOURCE: Extract from library matching success profile] + +## Professional Experience + +### [ROLE 1 - Most Recent/Relevant] +[CONSOLIDATION: {merge X positions OR keep separate}] +[TITLE OPTIONS: + A: {emphasize aspect 1} + B: {emphasize aspect 2} + Recommended: {option with rationale}] +[BULLET ALLOCATION: {N bullets based on relevance + recency}] +[GUIDANCE: Emphasize {themes}, look for {experience types}] + +Bullet 1: [SEEKING: {requirement type}] +Bullet 2: [SEEKING: {requirement type}] +... + +### [ROLE 2] +... + +## Education +[PLACEMENT: {top if required/recent, bottom if experience-heavy}] + +## [Optional Sections] +[INCLUDE IF: {criteria from success profile}] +``` + +**Checkpoint:** +``` +Present template to user: + +"Here's the optimized resume structure for this role: + +STRUCTURE: +{Section order and rationale} + +ROLE CONSOLIDATION: +{Decisions with options} + +TITLE REFRAMING: +{Proposed titles with alternatives} + +BULLET ALLOCATION: +Role 1: {N} bullets (most relevant) +Role 2: {N} bullets +... + +Does this structure work? Any adjustments to: +- Role consolidation? +- Title reframing? +- Bullet allocation?" + +Wait for user approval before proceeding. +``` + +**Output:** Approved template skeleton with guidance for each section + +### Phase 2.5: Experience Discovery (OPTIONAL) + +**Goal:** Surface undocumented experiences through conversational discovery + +**When to trigger:** +``` +After template approval, if gaps identified: + +"I've identified {N} gaps or areas where we have weak matches: +- {Gap 1}: {Current confidence} +- {Gap 2}: {Current confidence} +... + +Would you like to do a structured brainstorming session to surface +any experiences you haven't documented yet? + +This typically takes 10-15 minutes and often uncovers valuable content." + +User can accept or skip. +``` + +**Branching Interview Process:** + +**Approach:** Conversational with follow-up questions based on answers + +**For each gap, conduct branching dialogue (see branching-questions.md):** + +1. **Start with open probe:** + - Technical gap: "Have you worked with {skill}?" + - Soft skill gap: "Tell me about times you've {demonstrated_skill}" + - Recent work: "What have you worked on recently?" + +2. **Branch based on answer:** + - YES/Strong → Deep dive (scale, challenges, metrics) + - INDIRECT → Explore role and transferability + - ADJACENT → Explore related experience + - PERSONAL → Assess recency and substance + - NO → Try broader category or move on + +3. **Follow-up systematically:** + - Ask "what," "how," "why" to get details + - Quantify: "Any metrics?" + - Contextualize: "Was this production?" + - Validate: "Does this address the gap?" + +4. **Capture immediately:** + - Document experience as shared + - Ask clarifying questions (dates, scope, impact) + - Help articulate as resume bullet + - Tag which gap(s) it addresses + +**Capture Structure:** +```markdown +## Newly Discovered Experiences + +### Experience 1: {Brief description} +- Context: {Where/when} +- Scope: {Scale, duration, impact} +- Addresses: {Which gaps} +- Bullet draft: "{Achievement-focused bullet}" +- Confidence: {How well fills gap - percentage} + +### Experience 2: ... +``` + +**Integration Options:** + +After discovery session: +``` +"Great! I captured {N} new experiences. For each one: + +1. ADD TO CURRENT RESUME - Integrate now +2. ADD TO LIBRARY ONLY - Save for future, not needed here +3. REFINE FURTHER - Think more about articulation +4. DISCARD - Not relevant enough + +Let me know for each experience." +``` + +**Important Notes:** +- Keep truthfulness bar high - help articulate, NEVER fabricate +- Focus on gaps and weak matches, not strong areas +- Time-box if needed (10-15 minutes typical) +- User can skip entirely if confident in library +- Recognize when to move on - don't exhaust user + +**Output:** New experiences integrated into library, ready for matching + +### Phase 3: Assembly Phase + +**Goal:** Fill approved template with best-matching content, with transparent scoring + +**Inputs:** +- Approved template (from Phase 2) +- Resume library + discovered experiences (from Phase 0 + 2.5) +- Success profile (from Phase 1) + +**Process:** + +**3.1 For Each Template Slot:** + +1. **Extract all candidate bullets from library** + - All bullets from library database + - All newly discovered experiences + - Include source resume for each + +2. **Score each candidate** (see matching-strategies.md) + - Direct match (40%): Keywords, domain, technology, outcome + - Transferable (30%): Same capability, different context + - Adjacent (20%): Related tools, methods, problem space + - Impact (10%): Achievement type alignment + + Overall = (Direct × 0.4) + (Transfer × 0.3) + (Adjacent × 0.2) + (Impact × 0.1) + +3. **Rank candidates by score** + - Sort high to low + - Group by confidence band: + * 90-100%: DIRECT + * 75-89%: TRANSFERABLE + * 60-74%: ADJACENT + * <60%: WEAK/GAP + +4. **Present top 3 matches with analysis:** + ``` + TEMPLATE SLOT: {Role} - Bullet {N} + SEEKING: {Requirement description} + + MATCHES: + [DIRECT - 95%] "{bullet_text}" + ✓ Direct: {what matches directly} + ✓ Transferable: {what transfers} + ✓ Metrics: {quantified impact} + Source: {resume_name} + + [TRANSFERABLE - 78%] "{bullet_text}" + ✓ Transferable: {what transfers} + ✓ Adjacent: {what's adjacent} + ⚠ Gap: {what's missing} + Source: {resume_name} + + [ADJACENT - 62%] "{bullet_text}" + ✓ Adjacent: {what's related} + ⚠ Gap: {what's missing} + Source: {resume_name} + + RECOMMENDATION: Use DIRECT match (95%) + ALTERNATIVE: If avoiding repetition, use TRANSFERABLE (78%) with reframing + ``` + +5. **Handle gaps (confidence <60%):** + ``` + GAP IDENTIFIED: {Requirement} + + BEST AVAILABLE: {score}% - "{bullet_text}" + + REFRAME OPPORTUNITY: {If applicable} + Original: "{text}" + Reframed: "{adjusted_text}" (truthful because {reason}) + New confidence: {score}% + + OPTIONS: + 1. Use reframed version ({new_score}%) + 2. Acknowledge gap in cover letter + 3. Omit bullet slot (reduce allocation) + 4. Use best available with disclosure + + RECOMMENDATION: {Most appropriate option} + ``` + +**3.2 Content Reframing:** + +When good match (>60%) but terminology misaligned: + +**Apply strategies from matching-strategies.md:** +- Keyword alignment (preserve meaning, adjust terms) +- Emphasis shift (same facts, different focus) +- Abstraction level (adjust technical specificity) +- Scale emphasis (highlight relevant aspects) + +**Show before/after for transparency:** +``` +REFRAMING APPLIED: +Bullet: {template_slot} + +Original: "{original_bullet}" +Source: {resume_name} + +Reframed: "{reframed_bullet}" +Changes: {what changed and why} +Truthfulness: {why this is accurate} +``` + +**Checkpoint:** +``` +"I've matched content to your template. Here's the complete mapping: + +COVERAGE SUMMARY: +- Direct matches: {N} bullets ({percentage}%) +- Transferable: {N} bullets ({percentage}%) +- Adjacent: {N} bullets ({percentage}%) +- Gaps: {N} ({percentage}%) + +REFRAMINGS APPLIED: {N} +- {Example 1} +- {Example 2} + +GAPS IDENTIFIED: +- {Gap 1}: {Recommendation} +- {Gap 2}: {Recommendation} + +OVERALL JD COVERAGE: {percentage}% + +Review the detailed mapping below. Any adjustments to: +- Match selections? +- Reframings? +- Gap handling?" + +[Present full detailed mapping] + +Wait for user approval before generation. +``` + +**Output:** Complete bullet-by-bullet mapping with confidence scores and reframings + +### Phase 4: Generation Phase + +**Goal:** Create professional multi-format outputs + +**Inputs:** +- Approved content mapping (from Phase 3) +- User's formatting preferences (from library analysis) +- Target role information (from Phase 1) + +**Process:** + +**4.1 Markdown Generation:** + +**Compile mapped content into clean markdown:** + +```markdown +# {User_Name} + +{Contact_Info} + +--- + +## Professional Summary + +{Summary_from_template} + +--- + +## Key Skills + +**{Category_1}:** +- {Skills_from_library_matching_profile} + +**{Category_2}:** +- {Skills_from_library_matching_profile} + +{Repeat for all categories} + +--- + +## Professional Experience + +### {Job_Title} +**{Company} | {Location} | {Dates}** + +{Role_summary_if_applicable} + +• {Bullet_1_from_mapping} +• {Bullet_2_from_mapping} +... + +### {Next_Role} +... + +--- + +## Education + +**{Degree}** | {Institution} ({Year}) +**{Degree}** | {Institution} ({Year}) +``` + +**Use user's preferences:** +- Formatting style from library analysis +- Bullet structure pattern +- Section ordering +- Typical length (1-page vs 2-page) + +**Output:** `{Name}_{Company}_{Role}_Resume.md` + +**4.2 DOCX Generation:** + +Use local CLI conversion tools in this order: + +``` +Preferred: +1) scripts/export_resume.py --input {resume.md} --formats docx +2) pandoc {resume.md} -o {resume.docx} + +If no DOCX converter is available: +- Continue with markdown output +- Tell user DOCX export was skipped due missing dependencies +``` + +Create Word document with: +- Professional fonts (Calibri 11pt body, 12pt headers) +- Proper spacing (single within sections, space between) +- Clean bullet formatting (proper numbering config, NOT unicode) +- Header with contact information +- Appropriate margins (0.5-1 inch) +- Bold/italic emphasis (company names, titles, dates) +- Page breaks if 2-page resume + +**Output:** `{Name}_{Company}_{Role}_Resume.docx` + +**4.3 PDF Generation (Optional):** + +**If user requests PDF:** + +``` +Preferred: +1) scripts/export_resume.py --input {resume.md} --formats pdf +2) pandoc {resume.md} -o {resume.pdf} [with available PDF engine] +3) soffice --headless --convert-to pdf {resume.docx} + +Ensure formatting preservation +Professional appearance for direct submission +``` + +**Output:** `{Name}_{Company}_{Role}_Resume.pdf` + +**4.4 Generation Summary Report:** + +**Create metadata file:** + +```markdown +# Resume Generation Report +**{Role} at {Company}** + +**Date Generated:** {timestamp} + +## Target Role Summary +- Company: {Company} +- Position: {Role} +- IC Level: {If known} +- Focus Areas: {Key areas} + +## Success Profile Summary +- Key Requirements: {top 5} +- Cultural Fit Signals: {themes} +- Risk Factors Addressed: {mitigations} + +## Content Mapping Summary +- Total bullets: {N} +- Direct matches: {N} ({percentage}%) +- Transferable: {N} ({percentage}%) +- Adjacent: {N} ({percentage}%) +- Gaps identified: {list} + +## Reframing Applied +- {bullet}: {original} → {reframed} [Reason: {why}] +... + +## Source Resumes Used +- {resume1}: {N} bullets +- {resume2}: {N} bullets +... + +## Gaps Addressed + +### Before Experience Discovery: +{Gap analysis showing initial state} + +### After Experience Discovery: +{Gap analysis showing final state} + +### Remaining Gaps: +{Any unresolved gaps with recommendations} + +## Key Differentiators for This Role +{What makes user uniquely qualified} + +## Recommendations for Interview Prep +- Stories to prepare +- Questions to expect +- Gaps to address +``` + +**Output:** `{Name}_{Company}_{Role}_Resume_Report.md` + +**Present to user:** +``` +"Your tailored resume has been generated! + +FILES CREATED: +- {Name}_{Company}_{Role}_Resume.md +- {Name}_{Company}_{Role}_Resume.docx +- {Name}_{Company}_{Role}_Resume_Report.md +{- {Name}_{Company}_{Role}_Resume.pdf (if requested)} + +QUALITY METRICS: +- JD Coverage: {percentage}% +- Direct Matches: {percentage}% +- Newly Discovered: {N} experiences + +Review the files and let me know: +1. Save to library (recommended) +2. Need revisions +3. Save but don't add to library" +``` + +### Phase 5: Library Update (CONDITIONAL) + +**Goal:** Optionally add successful resume to library for future use + +**When:** After user reviews and approves generated resume + +**Checkpoint Question:** +``` +"Are you satisfied with this resume? + +OPTIONS: +1. YES - Save to library + → Adds resume to permanent location + → Rebuilds library database + → Makes new content available for future resumes + +2. NO - Need revisions + → What would you like to adjust? + → Make changes and re-present + +3. SAVE BUT DON'T ADD TO LIBRARY + → Keep files in current location + → Don't enrich database + → Useful for experimental resumes + +Which option?" +``` + +**If Option 1 (YES - Save to library):** + +**Process:** + +1. **Move resume to library:** + ``` + Source: {current_directory}/{Name}_{Company}_{Role}_Resume.md + Destination: {resume_library}/{Name}_{Company}_{Role}_Resume.md + + Also move: + - .docx file + - .pdf file (if exists) + - _Report.md file + ``` + +2. **Rebuild library database:** + ``` + Re-run Phase 0 library initialization + Parse newly created resume + Add bullets to experience database + Update keyword/theme indices + Tag with metadata: + - target_company: {Company} + - target_role: {Role} + - generated_date: {timestamp} + - jd_coverage: {percentage} + - success_profile: {reference to profile} + ``` + +3. **Preserve generation metadata:** + ```json + { + "resume_id": "{Name}_{Company}_{Role}", + "generated": "{timestamp}", + "source_resumes": ["{resume1}", "{resume2}"], + "reframings": [ + { + "original": "{text}", + "reframed": "{text}", + "reason": "{why}" + } + ], + "match_scores": { + "bullet_1": 95, + "bullet_2": 87, + ... + }, + "newly_discovered": [ + { + "experience": "{description}", + "bullet": "{text}", + "addresses_gap": "{gap}" + } + ] + } + ``` + +4. **Announce completion:** + ``` + "Resume saved to library! + + Library updated: + - Total resumes: {N} + - New content variations: {N} + - Newly discovered experiences added: {N} + + This resume and its new content are now available for future tailoring sessions." + ``` + +**If Option 2 (NO - Need revisions):** + +``` +"What would you like to adjust?" + +[Collect user feedback] +[Make requested changes] +[Re-run relevant phases] +[Re-present for approval] + +[Repeat until satisfied or user cancels] +``` + +**If Option 3 (SAVE BUT DON'T ADD TO LIBRARY):** + +``` +"Resume files saved to current directory: +- {Name}_{Company}_{Role}_Resume.md +- {Name}_{Company}_{Role}_Resume.docx +- {Name}_{Company}_{Role}_Resume_Report.md + +Not added to library - you can manually move later if desired." +``` + +**Benefits of Library Update:** +- Grows library with each successful resume +- New bullet variations become available +- Reframings that work can be reused +- Discovered experiences permanently captured +- Future sessions start with richer library +- Self-improving system over time + +**Output:** Updated library database + metadata preservation (if Option 1) + +## Error Handling & Edge Cases + +**Edge Case 1: Insufficient Resume Library** +``` +SCENARIO: User has only 1-2 resumes, limited content + +HANDLING: +"⚠️ Limited resume library detected ({N} resumes). + +This may result in: +- Fewer matching options +- More gaps in coverage +- Less variety in bullet phrasing + +RECOMMENDATIONS: +- Proceed with available content (I'll do my best!) +- Consider adding more resumes after this generation +- Experience Discovery phase will be especially valuable + +Continue? (Y/N)" +``` + +**Edge Case 2: No Good Matches (confidence <60% for critical requirement)** +``` +SCENARIO: Template slot requires experience user doesn't have + +HANDLING: +"❌ GAP: {Requirement} + +No matches found with confidence >60% + +OPTIONS: +1. Run Experience Discovery - might surface undocumented work +2. Reframe best available ({score}%) - I'll show you the reframing +3. Omit bullet slot - reduce template allocation +4. Note for cover letter - emphasize learning ability + +Which approach?" + +[Don't force matches - be transparent about gaps] +``` + +**Edge Case 3: Research Phase Failures** +``` +SCENARIO: Web research fails, LinkedIn unavailable, company info sparse + +HANDLING: +"⚠️ Limited company research available. + +What I found: +- {Available info} + +What's missing: +- {Missing areas} + +Falling back to job description-only analysis. +Result: Template will be optimized for JD but may miss cultural nuances. + +Do you have additional context about: +- Company culture? +- Team structure? +- Technologies used? + +(Optional - you can share or skip)" + +[Proceed with best-effort approach] +``` + +**Edge Case 4: Job Description Quality Issues** +``` +SCENARIO: Vague JD, missing requirements, poorly written + +HANDLING: +"⚠️ Job description lacks detail in areas: +- {Missing area 1} +- {Missing area 2} + +This may limit template optimization. + +Do you have additional context about: +- {Question 1} +- {Question 2} + +(Optional - I'll work with what's available)" + +[Extract what's possible, proceed] +``` + +**Edge Case 5: Ambiguous Role Consolidation** +``` +SCENARIO: Unclear whether to merge roles or keep separate + +HANDLING: +"🤔 Ambiguous consolidation decision for {Company} + +OPTION A (Consolidated): {Rationale} +OPTION B (Separate): {Rationale} + +Both are defensible. Which do you prefer? + +(This becomes your preference for similar situations)" + +[Remember preference for future] +``` + +**Edge Case 6: Resume Length Constraints** +``` +SCENARIO: Too much good content, exceeds 2 pages + +HANDLING: +"⚠️ Content exceeds 2 pages (current: {N} bullets, ~{pages} pages) + +PRUNING SUGGESTIONS (ranked by relevance): +Remove: +- {Bullet X}: {score}% match (lowest) +- {Bullet Y}: {score}% match +... + +Keep all bullets and accept >2 pages? +OR +Remove {N} bullets to fit 2 pages? + +Your preference?" + +[User decides priority] +``` + +**Error Recovery:** +- All checkpoints allow going back to previous phase +- User can request adjustments at any checkpoint +- Generation failures (DOCX/PDF) fall back to markdown-only +- Progress saved between phases (can resume if interrupted) + +**Graceful Degradation:** +- Research limited → Fall back to JD-only analysis +- Library small → Work with available + emphasize discovery +- Matches weak → Transparent gap identification +- Generation fails → Provide markdown + error details + +## Usage Examples + +**Example 1: Internal Role (Same Company)** +``` +USER: "I want to apply for Principal PM role in 1ES team at Microsoft. + Here's the JD: {paste}" + +SKILL: +1. Library Build: Finds 29 resumes +2. Research: Microsoft 1ES team, internal culture, role benchmarking +3. Template: Features PM2 Azure Eng Systems role (most relevant) +4. Discovery: Surfaces VS Code extension, Bhavana AI side project +5. Assembly: 92% JD coverage, 75% direct matches +6. Generate: MD + DOCX + Report +7. User approves → Library updated with new resume + 6 discovered experiences + +RESULT: Highly competitive application leveraging internal experience +``` + +**Example 2: Career Transition (Different Domain)** +``` +USER: "I'm a TPM trying to transition to ecology PM role. JD: {paste}" + +SKILL: +1. Library Build: Finds existing TPM resumes +2. Research: Ecology sector, sustainability focus, cross-domain transfers +3. Template: Reframes "Technical Program Manager" → "Program Manager, + Environmental Systems" emphasizing systems thinking +4. Discovery: Surfaces volunteer conservation work, graduate research in + environmental modeling +5. Assembly: 65% JD coverage - flags gaps in domain-specific knowledge +6. Generate: Resume + gap analysis with cover letter recommendations + +RESULT: Bridges technical skills with environmental domain +``` + +**Example 3: Career Gap Handling** +``` +USER: "I have a 2-year gap while starting a company. JD: {paste}" + +SKILL: +1. Library Build: Finds pre-gap resumes +2. Research: Standard analysis +3. Template: Includes startup as legitimate role +4. Discovery: Surfaces skills developed during startup (fundraising, + product development, team building) +5. Assembly: Frames gap as entrepreneurial experience +6. Generate: Resume presenting gap as valuable experience + +RESULT: Gap becomes strength showing initiative and diverse skills +``` + +**Example 4: Multi-Job Batch (3 Similar Roles)** +``` +USER: "I want to apply for these 3 TPM roles: + 1. Microsoft 1ES Principal PM + 2. Google Cloud Senior TPM + 3. AWS Container Services Senior PM + Here are the JDs: {paste 3 JDs}" + +SKILL: +1. Multi-job detection: Triggered (3 JDs detected) +2. Intake: Collects all 3 JDs, initializes batch +3. Library Build: Finds 29 resumes (once) +4. Gap Analysis: Identifies 14 gaps, 8 unique after deduplication +5. Shared Discovery: 30-minute session surfaces 5 new experiences + - Kubernetes CI/CD for nonprofits + - Azure migration for university lab + - Cross-functional team leadership examples + - Recent hackathon project + - Open source contributions +6. Per-Job Processing (×3): + - Job 1 (Microsoft): 85% coverage, emphasizes Azure/1ES alignment + - Job 2 (Google): 88% coverage, emphasizes technical depth + - Job 3 (AWS): 78% coverage, addresses AWS gap in cover letter recs +7. Batch Finalization: All 3 resumes reviewed, approved, added to library + +RESULT: 3 high-quality resumes in 40 minutes vs 45 minutes sequential + 5 new experiences captured, available for future applications + Average coverage: 84%, all critical gaps resolved +``` + +**Example 5: Incremental Batch Addition** +``` +WEEK 1: +USER: "I want to apply for 3 jobs: {Microsoft, Google, AWS}" +SKILL: [Processes batch as above, completes in 40 min] + +WEEK 2: +USER: "I found 2 more jobs: Stripe and Meta. Add them to my batch?" +SKILL: +1. Load existing batch (includes 5 previously discovered experiences) +2. Intake: Adds Job 4 (Stripe), Job 5 (Meta) +3. Incremental Gap Analysis: Only 3 new gaps (vs 14 original) + - Payment systems (Stripe-specific) + - Social networking (Meta-specific) + - React/frontend (both) +4. Incremental Discovery: 10-minute session for new gaps only + - Surfaces payment processing side project + - React work from bootcamp + - Large-scale system design course +5. Per-Job Processing (×2): Jobs 4, 5 processed independently +6. Updated Batch Summary: Now 5 jobs total, 8 experiences discovered + +RESULT: 2 additional resumes in 20 minutes (vs 30 min if starting from scratch) + Time saved by not re-asking 8 previous gaps: ~20 minutes +``` + +## Testing Guidelines + +**Manual Testing Checklist:** + +**Test 1: Happy Path** +``` +- Provide JD with clear requirements +- Library with 10+ resumes +- Run all phases without skipping +- Verify generated files +- Check library update +PASS CRITERIA: +- All files generated correctly +- JD coverage >70% +- No errors in any phase +``` + +**Test 2: Minimal Library** +``` +- Provide only 2 resumes +- Run through workflow +- Verify gap handling +PASS CRITERIA: +- Graceful warning about limited library +- Still produces reasonable output +- Gaps clearly identified +``` + +**Test 3: Research Failures** +``` +- Use obscure company with minimal online presence +- Verify fallback to JD-only +PASS CRITERIA: +- Warning about limited research +- Proceeds with JD analysis +- Template still reasonable +``` + +**Test 4: Experience Discovery Value** +``` +- Run with deliberate gaps in library +- Conduct experience discovery +- Verify new experiences integrated +PASS CRITERIA: +- Discovers genuine undocumented experiences +- Integrates into final resume +- Improves JD coverage +``` + +**Test 5: Title Reframing** +``` +- Test various role transitions +- Verify title reframing suggestions +PASS CRITERIA: +- Multiple options provided +- Truthfulness maintained +- Rationales clear +``` + +**Test 6: Multi-format Generation** +``` +- Generate MD, DOCX, PDF, Report +- Verify formatting consistency +PASS CRITERIA: +- All formats readable +- Formatting professional +- Content identical across formats +``` + +**Regression Testing:** +``` +After any SKILL.md changes: +1. Re-run Test 1 (happy path) +2. Verify no functionality broken +3. Commit only if passes +``` diff --git a/codex/branching-questions.md b/codex/branching-questions.md new file mode 100644 index 0000000..f033641 --- /dev/null +++ b/codex/branching-questions.md @@ -0,0 +1,209 @@ +# Branching Experience Discovery Questions + +## Overview + +Conversational discovery with follow-up questions based on answers. NOT a static questionnaire - each answer informs the next question. + +## Multi-Job Context + +When running discovery for multiple jobs (multi-job mode), provide context about which jobs the gap appears in: + +**Template:** +``` +"{SKILL} experience appears in {N} of your target jobs ({Company1}, {Company2}, ...). + +This is a {HIGH/MEDIUM/LOW}-LEVERAGE gap - addressing it helps {N/some/one} application(s). + +Current best match: {X}% confidence ('{best_match_text}') + +{Standard branching question}" +``` + +**Leverage Classification:** +- HIGH-LEVERAGE: Appears in 3+ jobs (critical gaps) +- MEDIUM-LEVERAGE: Appears in 2 jobs (important gaps) +- LOW-LEVERAGE: Appears in 1 job (job-specific gaps) + +**Example:** + +``` +"Cross-functional leadership appears in 2 of your target jobs (Microsoft, Google). + +This is a MEDIUM-LEVERAGE gap - addressing it helps 2 applications. + +Current best match: 67% confidence ('Led team of 3 engineers on AI project') + +Tell me about times you've led or coordinated across multiple teams or functions." +``` + +After providing context, proceed with standard branching patterns below. + +## Technical Skill Gap Pattern + +**Template:** +``` +INITIAL PROBE: +"I noticed the job requires {SKILL}. Have you worked with {SKILL} or {RELATED_AREA}?" + +BRANCH A - If YES (Direct Experience): + → "Tell me more - what did you use it for?" + → "What scale? {Relevant metric}?" + → "Was this production or development/testing?" + → "What specific challenges did you solve?" + → "Any metrics on {performance/reliability/cost}?" + → CAPTURE: Build detailed bullet + +BRANCH B - If INDIRECT: + → "What was your role in relation to the {SKILL} work?" + → "Did you {action1}, {action2}, or {action3}?" + → "What did you learn about {SKILL}?" + → ASSESS: Transferable experience? + → CAPTURE: Frame as support/enabling role if substantial + +BRANCH C - If ADJACENT: + → "Tell me about your {ADJACENT_TECH} experience" + → "Did you do {relevant_activity}?" + → ASSESS: Close enough to mention? + → CAPTURE: Frame as related expertise + +BRANCH D - If PERSONAL/LEARNING: + → "Any personal projects, courses, or self-learning?" + → "What did you build or deploy?" + → "How recent was this?" + → ASSESS: Strong enough if recent and substantive + → CAPTURE: Consider if gap critical + +BRANCH E - If COMPLETE NO: + → "Any other {broader_category} work?" + → If yes: Explore that + → If no: Move to next gap +``` + +## Soft Skill / Experience Gap Pattern + +**Template:** +``` +INITIAL PROBE: +"The role emphasizes {SOFT_SKILL}. Tell me about times you've {demonstrated_that_skill}." + +BRANCH A - If STRONG EXAMPLE: + → "What {entities} were involved?" + → "What was the challenge?" + → "How did you {drive_outcome}?" + → "What was the result? Metrics?" + → "Any {obstacle} you had to navigate?" + → CAPTURE: Detailed bullet with impact + +BRANCH B - If VAGUE/UNCERTAIN: + → "Let me ask differently - have you ever {reframed_question}?" + → "What was that situation?" + → "How many {stakeholders}?" + → "What made it challenging?" + → CAPTURE: Help articulate clearly + +BRANCH C - If PROJECT-SPECIFIC: + → "Tell me more about that project" + → "What was your role vs. others?" + → "Who did you coordinate with?" + → "How did you ensure alignment?" + → ASSESS: Enough depth? + → CAPTURE: Frame as leadership if substantial + +BRANCH D - If VOLUNTEER/SIDE WORK: + → "Interesting - tell me more" + → "What was scope and timeline?" + → "What skills relate to this job?" + → "Measurable outcomes?" + → ASSESS: Relevant enough? + → CAPTURE: Include if demonstrates capability +``` + +## Recent Work Probe Pattern + +**Template:** +``` +INITIAL PROBE: +"What have you been working on in the last 6 months that isn't in your resumes yet?" + +BRANCH A - If DESCRIBES PROJECT: + → "Tell me more - what was your role?" + → "What technologies/methods?" + → "What problem were you solving?" + → "What was the impact?" + → CHECK: "Does this address {gap_area}?" + → CAPTURE: Create bullet if substantive + +BRANCH B - If MENTIONS MULTIPLE: + → "Let's explore each. Starting with {first}..." + → Go through systematically + → Prioritize by gap relevance + → CAPTURE: Multiple bullets if relevant + +BRANCH C - If "NOTHING NEW": + → "What about small improvements, process changes, learning?" + → "New tools or technologies?" + → "Mentoring, teaching, knowledge sharing?" + → "Anything that might seem too small?" + → CAPTURE: Even small items can fill gaps +``` + +## Key Principles + +**1. Start Broad, Go Narrow:** +- Initial question is open-ended +- Follow-ups drill into specifics based on what they share +- Don't ask about details they already covered + +**2. Listen and Branch Dynamically:** +- Promising answer → explore deeper +- "No" answer → try adjacent probe or move on +- Unclear → rephrase or provide examples +- Stuck → offer concrete scenarios + +**3. Adapt to Communication Style:** +- Verbose: "What's the key takeaway?" +- Terse: Provide examples, "Was it like X or Y?" +- Uncertain: Validate, offer prompts + +**4. Recognize When to Move On:** +- After 2-3 attempts with nothing → gracefully move on +- Don't exhaust user forcing content that doesn't exist +- Some gaps okay - flag for cover letter + +**5. Cross-Reference During Discovery:** +- "Earlier you mentioned {X}, does that relate here too?" +- "This sounds similar to {Y} - connected?" +- Build connections user might not see + +## Example Complete Flow + +``` +SKILL: "I see the job requires CI/CD experience. Have you worked with CI/CD pipelines?" + +USER: "Yes, I set up GitHub Actions for my side project." + +SKILL: "Tell me more about that - what does the pipeline do?" [BRANCH: Depth] + +USER: "It runs tests and deploys to cloud when I push code." + +SKILL: "What testing framework do you use?" [BRANCH: Technical details] + +USER: "pytest for my Python code." + +SKILL: "Excellent. What's deployed - is this a production app?" [BRANCH: Impact/scale] + +USER: "Yeah, it's a tool used by 3 nonprofits right now." + +SKILL: "That's substantial! Have you dealt with any flaky tests or reliability issues?" + [BRANCH: Real-world challenges] + +USER: "Actually yes, I had to add retries for some API-dependent tests." + +SKILL: "Perfect - that shows practical troubleshooting. Any metrics on build time + or deployment frequency?" [BRANCH: Quantify] + +USER: "Deploys take about 3 minutes, and I deploy several times a week." + +[CAPTURED: Complete picture - hands-on CI/CD, pytest, flaky test handling, +production deployment. Directly fills gap with concrete details.] +``` diff --git a/codex/matching-strategies.md b/codex/matching-strategies.md new file mode 100644 index 0000000..131e8da --- /dev/null +++ b/codex/matching-strategies.md @@ -0,0 +1,162 @@ +# Content Matching Strategies + +## Overview + +Match experiences from library to template slots with transparent confidence scoring. + +## Matching Criteria (Weighted) + +**1. Direct Match (40%)** +- Keywords overlap with JD/success profile +- Same domain/technology mentioned +- Same type of outcome required +- Same scale or complexity level + +**Scoring:** +- 90-100%: Exact match (same skill, domain, context) +- 70-89%: Strong match (same skill, different domain) +- 50-69%: Good match (overlapping keywords, similar outcomes) +- <50%: Weak direct match + +**2. Transferable Skills (30%)** +- Same capability in different context +- Leadership in different domain +- Technical problem-solving in different stack +- Similar scale/complexity in different industry + +**Scoring:** +- 90-100%: Directly transferable (process, skill generic) +- 70-89%: Mostly transferable (some domain translation needed) +- 50-69%: Partially transferable (analogy required) +- <50%: Stretch to call transferable + +**3. Adjacent Experience (20%)** +- Touched on skill as secondary responsibility +- Used related tools/methodologies +- Worked in related problem space +- Supporting role in relevant area + +**Scoring:** +- 90-100%: Closely adjacent (just different framing) +- 70-89%: Clearly adjacent (related but distinct) +- 50-69%: Somewhat adjacent (requires explanation) +- <50%: Loosely adjacent + +**4. Impact Alignment (10%)** +- Achievement type matches what role values +- Quantitative metrics (if JD emphasizes data-driven) +- Team outcomes (if JD emphasizes collaboration) +- Innovation (if JD emphasizes creativity) +- Scale (if JD emphasizes hyperscale) + +**Scoring:** +- 90-100%: Perfect impact alignment +- 70-89%: Strong impact alignment +- 50-69%: Moderate impact alignment +- <50%: Weak impact alignment + +## Overall Confidence Score + +``` +Overall = (Direct × 0.4) + (Transferable × 0.3) + (Adjacent × 0.2) + (Impact × 0.1) +``` + +**Confidence Bands:** +- 90-100%: DIRECT - Use with confidence +- 75-89%: TRANSFERABLE - Strong candidate +- 60-74%: ADJACENT - Acceptable with reframing +- 45-59%: WEAK - Consider only if no better option +- <45%: GAP - Flag as unaddressed requirement + +## Content Reframing Strategies + +**When to reframe:** Good match (>60%) but language doesn't align with target terminology + +**Strategy 1: Keyword Alignment** +``` +Preserve meaning, adjust terminology + +Before: "Led experimental design and data analysis programs" +After: "Led data science programs combining experimental design and + statistical analysis" +Reason: Target role uses "data science" terminology +``` + +**Strategy 2: Emphasis Shift** +``` +Same facts, different focus + +Before: "Designed statistical experiments... saving millions in recall costs" +After: "Prevented millions in potential recall costs through predictive + risk detection using statistical modeling" +Reason: Target role values business outcomes over technical methods +``` + +**Strategy 3: Abstraction Level** +``` +Adjust technical specificity + +Before: "Built MATLAB-based automated system for evaluation" +After: "Developed automated evaluation system" +Reason: Target role is language-agnostic, emphasize outcome + +OR + +After: "Built automated evaluation system (MATLAB, Python integration)" +Reason: Target role values technical specificity +``` + +**Strategy 4: Scale Emphasis** +``` +Highlight relevant scale aspects + +Before: "Managed project with 3 stakeholders" +After: "Led cross-functional initiative coordinating 3 organizational units" +Reason: Emphasize cross-org complexity over headcount +``` + +## Gap Handling + +**When match confidence < 60%:** + +**Option 1: Reframe Adjacent Experience** +``` +Present reframing option: + +TEMPLATE SLOT: {Requirement} +BEST MATCH: {Experience} (Confidence: {score}%) + +REFRAME OPPORTUNITY: +Original: "{bullet_text}" +Reframed: "{adjusted_text}" +Justification: {why this is truthful} + +RECOMMENDATION: Use reframed version? Y/N +``` + +**Option 2: Flag as Gap** +``` +GAP IDENTIFIED: {Requirement} + +AVAILABLE OPTIONS: +None with confidence >60% + +RECOMMENDATIONS: +1. Address in cover letter - emphasize learning ability +2. Omit bullet slot - reduce template allocation +3. Include best available match ({score}%) with disclosure +4. Discover new experience through brainstorming + +User decides how to proceed. +``` + +**Option 3: Discover New Experience** +``` +If Experience Discovery not yet run: + +"This gap might be addressable through experience discovery. +Would you like to do a quick branching interview about {gap_area}?" + +If already run: +Accept gap, move forward. +``` diff --git a/codex/multi-job-workflow.md b/codex/multi-job-workflow.md new file mode 100644 index 0000000..730a34a --- /dev/null +++ b/codex/multi-job-workflow.md @@ -0,0 +1,1381 @@ +# Multi-Job Resume Tailoring Workflow + +## Overview + +Handles 3-5 similar jobs efficiently by consolidating experience discovery while maintaining per-job research depth. + +**Architecture:** Shared Discovery + Per-Job Tailoring + +**Target Use Case:** +- Small batches (3-5 jobs) +- Moderately similar roles (60%+ requirement overlap) +- Continuous workflow (add jobs incrementally) + +## Phase 0: Job Intake & Batch Initialization + +**Goal:** Collect all job descriptions and initialize batch structure + +**User Interaction:** + +``` +SKILL: "Let's set up your multi-job batch. How would you like to provide +the job descriptions? + +1. Paste them all now (recommended for efficiency) +2. Provide one at a time +3. Provide URLs to fetch + +For each job, I need: +- Job description (text or URL) +- Company name (if not in JD) +- Role title (if not in JD) +- Optional: Priority (high/medium/low) or notes" +``` + +**Data Collection Loop:** + +For each job (until user says "done"): +1. Collect JD text or URL +2. Collect company name (extract from JD if possible, else ask) +3. Collect role title (extract from JD if possible, else ask) +4. Ask: "Priority for this job? (high/medium/low, default: medium)" +5. Ask: "Any notes about this job? (optional, e.g., 'referral from X')" +6. Assign job_id: "job-1", "job-2", etc. +7. Set status: "pending" +8. Add to batch + +**Quick JD Parsing:** + +For each job, lightweight extraction (NOT full research yet): + +```python +# Pseudo-code +def quick_parse_jd(jd_text): + return { + "requirements_must_have": extract_requirements(jd_text, required=True), + "requirements_nice_to_have": extract_requirements(jd_text, required=False), + "technical_skills": extract_technical_keywords(jd_text), + "soft_skills": extract_soft_skills(jd_text), + "domain_areas": identify_domains(jd_text) + } +``` + +Purpose: Just enough to identify gaps for discovery phase. Full research happens per-job later. + +**Batch Initialization:** + +Create batch directory structure: + +``` +resumes/batches/batch-{YYYY-MM-DD}-{slug}/ +├── _batch_state.json # State tracking +├── _aggregate_gaps.md # Gap analysis (created in Phase 1) +├── _discovered_experiences.md # Discovery output (created in Phase 2) +└── (job directories created during per-job processing) +``` + +Initialize _batch_state.json: + +```json +{ + "batch_id": "batch-2025-11-04-job-search", + "created": "2025-11-04T10:30:00Z", + "current_phase": "intake", + "processing_mode": "interactive", + "jobs": [ + { + "job_id": "job-1", + "company": "Microsoft", + "role": "Principal PM - 1ES", + "jd_text": "...", + "jd_url": "https://...", + "priority": "high", + "notes": "Internal referral from Alice", + "status": "pending", + "requirements": ["Kubernetes", "CI/CD", "Leadership"], + "gaps": [] + } + ], + "discoveries": [], + "aggregate_gaps": {} +} +``` + +**Library Initialization:** + +Run standard Phase 0 from SKILL.md (library initialization) once for the entire batch. + +**Output:** + +``` +"Batch initialized with {N} jobs: +- Job 1: {Company} - {Role} (priority: {priority}) +- Job 2: {Company} - {Role} +... + +Next: Aggregate gap analysis across all jobs. +Continue? (Y/N)" +``` + +**Checkpoint:** User confirms batch is complete before proceeding. + +## Phase 1: Aggregate Gap Analysis + +**Goal:** Build unified gap list across all jobs to guide single efficient discovery session + +**Process:** + +**1.1 Extract Requirements from All JDs:** + +For each job: +- Parse requirements (already done in Phase 0 quick parse) +- Categorize: must-have vs nice-to-have +- Extract keywords and skill areas + +Example output: +``` +Job 1 (Microsoft 1ES): Kubernetes, CI/CD, cross-functional leadership, Azure +Job 2 (Google Cloud): Kubernetes, GCP, distributed systems, team management +Job 3 (AWS): Container orchestration, AWS services, program management +``` + +**1.2 Match Against Resume Library:** + +For each requirement across ALL jobs: +1. Search library for matching experiences (using matching-strategies.md) +2. Score confidence (0-100%) +3. Flag as gap if confidence < 60% + +```python +# Pseudo-code +for job in batch.jobs: + for requirement in job.requirements: + matches = search_library(requirement) + best_score = max(match.score for match in matches) + if best_score < 60: + flag_as_gap(requirement, best_score, job.job_id) +``` + +**1.3 Build Aggregate Gap Map:** + +Deduplicate gaps across jobs and prioritize: + +```python +# Pseudo-code +def build_aggregate_gaps(all_gaps): + gap_map = {} + + for gap in all_gaps: + if gap.name not in gap_map: + gap_map[gap.name] = { + "appears_in_jobs": [], + "best_match": gap.confidence, + "priority": 0 + } + gap_map[gap.name]["appears_in_jobs"].append(gap.job_id) + + # Prioritize + for gap_name, gap_data in gap_map.items(): + job_count = len(gap_data["appears_in_jobs"]) + if job_count >= 3: + gap_data["priority"] = 3 # Critical + elif job_count == 2: + gap_data["priority"] = 2 # Important + else: + gap_data["priority"] = 1 # Job-specific + + return gap_map +``` + +**1.4 Create Gap Analysis Report:** + +Generate `_aggregate_gaps.md`: + +```markdown +# Aggregate Gap Analysis +**Batch:** batch-2025-11-04-job-search +**Generated:** 2025-11-04T11:00:00Z + +## Coverage Summary + +- Job 1 (Microsoft): 68% coverage, 5 gaps +- Job 2 (Google): 72% coverage, 4 gaps +- Job 3 (AWS): 65% coverage, 6 gaps + +## Critical Gaps (appear in 3+ jobs) + +### Kubernetes at scale +- **Appears in:** Jobs 1, 2, 3 +- **Current best match:** 45% confidence +- **Match source:** "Deployed containerized app for nonprofit" (2023) +- **Gap:** No production Kubernetes management at scale + +### CI/CD pipeline management +- **Appears in:** Jobs 1, 2, 3 +- **Current best match:** 58% confidence +- **Match source:** "Set up GitHub Actions workflow" (2024) +- **Gap:** Limited enterprise CI/CD experience + +## Important Gaps (appear in 2 jobs) + +### Cloud-native architecture +- **Appears in:** Jobs 2, 3 +- **Current best match:** 52% confidence + +### Cross-functional team leadership +- **Appears in:** Jobs 1, 2 +- **Current best match:** 67% confidence (not a gap, but could improve) + +## Job-Specific Gaps + +### Azure-specific experience +- **Appears in:** Job 1 only +- **Current best match:** 40% confidence + +### GCP experience +- **Appears in:** Job 2 only +- **Current best match:** 35% confidence + +## Aggregate Statistics + +- **Total gaps:** 14 +- **Unique gaps:** 8 (after deduplication) +- **Critical gaps:** 3 +- **Important gaps:** 4 +- **Job-specific gaps:** 1 + +## Recommended Discovery Time + +- Critical gaps (3 gaps × 5-7 min): 15-20 minutes +- Important gaps (4 gaps × 3-5 min): 12-20 minutes +- Job-specific gaps (1 gap × 2-3 min): 2-3 minutes + +**Total estimated discovery time:** 30-40 minutes + +For 3 similar jobs, this replaces 3 × 15 min = 45 min of sequential discovery. +``` + +**1.5 Update Batch State:** + +```json +{ + "current_phase": "gap_analysis", + "aggregate_gaps": { + "critical_gaps": [ + { + "gap_name": "Kubernetes at scale", + "appears_in_jobs": ["job-1", "job-2", "job-3"], + "current_best_match": 45, + "priority": 3 + } + ], + "important_gaps": [...], + "job_specific_gaps": [...] + } +} +``` + +**Output to User:** + +``` +"Gap analysis complete! Here's what I found: + +COVERAGE SUMMARY: +- Job 1 (Microsoft): 68% coverage, 5 gaps +- Job 2 (Google): 72% coverage, 4 gaps +- Job 3 (AWS): 65% coverage, 6 gaps + +AGGREGATE GAPS (14 total, 8 unique after deduplication): +- 3 critical gaps (appear in all jobs) 🔴 +- 4 important gaps (appear in 2 jobs) 🟡 +- 1 job-specific gap 🔵 + +I recommend a 30-40 minute experience discovery session to address +these gaps. This will benefit all 3 applications. + +Would you like to: +1. START DISCOVERY - Address gaps through conversational discovery +2. SKIP DISCOVERY - Proceed with current library (not recommended) +3. REVIEW GAPS - See detailed gap analysis first + +Recommendation: Option 1 or 3 (review then start)" +``` + +**Checkpoint:** User chooses next action before proceeding. + +## Phase 2: Shared Experience Discovery + +**Goal:** Surface undocumented experiences across all gaps through single conversational session + +**Core Principle:** Same branching interview from branching-questions.md, but with multi-job context for each question. + +**Session Flow:** + +**2.1 Start with Highest-Leverage Gaps:** + +Process gaps in priority order: +1. Critical gaps (appear in 3+ jobs) - 5-7 min each +2. Important gaps (appear in 2 jobs) - 3-5 min each +3. Job-specific gaps - 2-3 min each + +**2.2 Multi-Job Contextualized Questions:** + +For each gap, provide multi-job context before branching interview: + +**Single-Job Version (from branching-questions.md):** +``` +"I noticed the job requires Kubernetes experience. Have you worked with Kubernetes?" +``` + +**Multi-Job Version (new):** +``` +"Kubernetes experience appears in 3 of your target jobs (Microsoft, Google, AWS). + +This is a HIGH-LEVERAGE gap - addressing it helps multiple applications. + +Current best match: 45% confidence ('Deployed containerized app for nonprofit') + +Have you worked with Kubernetes or container orchestration?" +``` + +**2.3 Conduct Branching Interview:** + +For each gap: +1. Initial probe with multi-job context (see above) +2. Branch based on answer using branching-questions.md patterns: + - YES → Deep dive (scale, challenges, metrics) + - INDIRECT → Explore role and transferability + - ADJACENT → Explore related experience + - PERSONAL → Assess recency and substance + - NO → Try broader category or move on + +3. Follow up systematically: + - "What," "how," "why" questions + - Quantify: "Any metrics?" + - Contextualize: "Was this production?" + - Validate: "This addresses {gap} for {jobs}" + +4. Capture immediately with job tags: + +**2.4 Capture Structure:** + +As experiences are discovered, capture to `_discovered_experiences.md`: + +```markdown +# Discovered Experiences +**Batch:** batch-2025-11-04-job-search +**Discovery Date:** 2025-11-04T11:30:00Z + +## Experience 1: Kubernetes CI/CD for nonprofit project + +**Context:** Side project, 2023-2024, production deployment + +**Scope:** +- GitHub Actions pipeline with Kubernetes deployments +- 3 nonprofit organizations using it +- Integrated pytest for testing +- Managed scaling and monitoring + +**Metrics:** +- 3 production deployments +- 99.9% uptime over 12 months +- Reduced deployment time from 2 hours to 15 minutes + +**Addresses gaps in:** +- Jobs 1, 2, 3: Kubernetes at scale +- Jobs 1, 2: CI/CD pipeline management + +**Confidence Improvement:** +- Kubernetes: 45% → 75% (+30%) +- CI/CD: 58% → 82% (+24%) + +**Bullet Draft:** +"Designed and implemented Kubernetes-based CI/CD pipeline using GitHub Actions +and pytest, supporting production deployments for 3 nonprofit organizations with +99.9% uptime and 87% reduction in deployment time" + +**Integration Decision:** [Pending user approval] + +--- + +## Experience 2: Azure migration for university lab + +**Context:** Graduate research, 2022-2023 + +**Scope:** +- Migrated on-premise compute to Azure VMs +- Set up Azure DevOps for lab +- Managed costs and resource allocation + +**Metrics:** +- Migrated 15 TB of data +- Reduced compute costs by 40% +- Supported 25 researchers + +**Addresses gaps in:** +- Job 1 only: Azure-specific experience + +**Confidence Improvement:** +- Azure: 40% → 70% (+30%) + +**Bullet Draft:** +"Led Azure cloud migration for university research lab, migrating 15 TB of data +and implementing Azure DevOps, reducing compute costs by 40% while supporting +25 researchers" + +**Integration Decision:** [Pending user approval] +``` + +**2.5 Track Coverage Improvement in Real-Time:** + +After each discovery, update user: + +``` +"Great! That addresses Kubernetes for all 3 jobs. + +UPDATED COVERAGE: +- Job 1 (Microsoft): 68% → 78% (+10%) +- Job 2 (Google): 72% → 82% (+10%) +- Job 3 (AWS): 65% → 75% (+10%) + +Remaining critical gaps: 2 (down from 3) + +Continue with next gap? (Y/N)" +``` + +**2.6 Integration Decision Per Experience:** + +After discovery session complete: + +``` +"Excellent! I captured 5 new experiences addressing gaps across your jobs. + +For each experience, how should I integrate it? + +--- +EXPERIENCE 1: Kubernetes CI/CD for nonprofit project +├─ Addresses: Jobs 1, 2, 3 +└─ Options: + 1. ADD TO LIBRARY FOR ALL JOBS - Integrate and use everywhere + 2. ADD TO LIBRARY, USE SELECTIVELY - User picks which jobs + 3. SKIP - Don't integrate + +Your choice for Experience 1? (1/2/3) + +--- +EXPERIENCE 2: Azure migration for university lab +├─ Addresses: Job 1 only +└─ Options: + 1. ADD TO LIBRARY - Integrate for Job 1 + 2. SKIP - Not needed + +Your choice for Experience 2? (1/2) +``` + +**2.7 Enrich Library:** + +For each approved experience: +1. Add to library database +2. Tag with metadata: + - discovered_date + - addressed_gaps + - used_in_jobs + - confidence_improvement + +**2.8 Update Batch State:** + +```json +{ + "current_phase": "discovery", + "discoveries": [ + { + "experience_id": "disc-1", + "text": "Kubernetes CI/CD for nonprofit project", + "context": "Side project, 2023-2024, production", + "scope": "GitHub Actions, 3 nonprofits, pytest, monitoring", + "addresses_jobs": ["job-1", "job-2", "job-3"], + "addresses_gaps": ["Kubernetes", "CI/CD"], + "confidence_improvement": { + "Kubernetes": {"before": 45, "after": 75}, + "CI/CD": {"before": 58, "after": 82} + }, + "integrated": true, + "bullet_draft": "Designed and implemented..." + } + ] +} +``` + +**Output:** + +``` +"Discovery complete! + +SUMMARY: +- New experiences captured: 5 +- Experiences integrated: 5 +- Average coverage improvement: +16% + +FINAL COVERAGE: +- Job 1 (Microsoft): 68% → 85% (+17%) +- Job 2 (Google): 72% → 88% (+16%) +- Job 3 (AWS): 65% → 78% (+13%) + +Remaining gaps: 5 (down from 14) +├─ 0 critical gaps ✓ +├─ 2 important gaps +└─ 3 job-specific gaps + +Ready to proceed with per-job processing? (Y/N)" +``` + +**Checkpoint:** User approves before moving to per-job processing. + +## Phase 3: Per-Job Processing + +**Goal:** Process each job independently through research/template/matching/generation + +**Key Insight:** Once discovery is complete, each job can be processed independently using enriched library. + +**Processing Modes:** + +Before starting, ask user: + +``` +"Discovery complete! Now processing each job individually. + +PROCESSING MODE: +1. INTERACTIVE (default) - I'll show you checkpoints for each job + (template approval, content mapping approval) + +2. EXPRESS - I'll auto-approve templates and matching using best judgment, + you review all final resumes together + +Recommendation: INTERACTIVE for first 1-2 jobs, then switch to EXPRESS +if you like the pattern. + +Which mode for Job 1? (1/2)" +``` + +**3.1 Per-Job Loop:** + +For each job in batch (job.status == "pending"): + +1. Set job.status = "in_progress" +2. Set job.current_phase = "research" +3. Create job directory: `resumes/batches/{batch_id}/job-{N}-{company-slug}/` +4. Process through phases (see below) +5. Set job.status = "completed" +6. Set job.files_generated = true +7. Move to next job + +**3.2 Phase 3A: Research (Per-Job)** + +**Same depth as single-job workflow (SKILL.md Phase 1):** + +``` +Job {N}/{total}: {Company} - {Role} +├─ Company research via WebSearch (mission, values, culture, news) +├─ Role benchmarking via LinkedIn (find 3-5 similar role holders) +├─ Success profile synthesis +└─ Checkpoint (if INTERACTIVE mode): Present success profile to user +``` + +Save to: `job-{N}-{company-slug}/success_profile.md` + +**INTERACTIVE Mode:** +``` +"Job 1: Microsoft - Principal PM + +Based on my research, here's what makes candidates successful for this role: + +{SUCCESS_PROFILE_SUMMARY} + +Key findings: +- {Finding 1} +- {Finding 2} +- {Finding 3} + +Does this match your understanding? Any adjustments? + +(Y to proceed / provide feedback)" +``` + +**EXPRESS Mode:** +- Generate success profile +- Save to file +- Proceed automatically (no checkpoint) + +**3.3 Phase 3B: Template Generation (Per-Job)** + +**Same process as single-job workflow (SKILL.md Phase 2):** + +``` +├─ Role consolidation decisions +├─ Title reframing options +├─ Bullet allocation +└─ Checkpoint (if INTERACTIVE): Approve template structure +``` + +Save to: `job-{N}-{company-slug}/template.md` + +**INTERACTIVE Mode:** +``` +"Here's the optimized resume structure for {Company} - {Role}: + +STRUCTURE: +{Section order and rationale} + +ROLE CONSOLIDATION: +{Decisions with options} + +TITLE REFRAMING: +{Proposed titles with alternatives} + +BULLET ALLOCATION: +{Allocation with rationale} + +Approve? (Y/N/adjust)" +``` + +**EXPRESS Mode:** +- Generate template using best judgment +- Save to file +- Proceed automatically + +**3.4 Phase 3C: Content Matching (Per-Job)** + +**Same process as single-job workflow (SKILL.md Phase 3):** + +Uses enriched library (includes discovered experiences from Phase 2) + +``` +├─ Match content to template slots +├─ Confidence scoring (Direct/Transferable/Adjacent) +├─ Reframing suggestions +├─ Gap identification (should be minimal after discovery) +└─ Checkpoint (if INTERACTIVE): Approve content mapping +``` + +Save to: `job-{N}-{company-slug}/content_mapping.md` + +**INTERACTIVE Mode:** +``` +"Content matched for {Company} - {Role}: + +COVERAGE SUMMARY: +- Direct matches: {N} bullets ({%}%) +- Transferable: {N} bullets ({%}%) +- Adjacent: {N} bullets ({%}%) +- Gaps: {N} ({%}%) + +OVERALL JD COVERAGE: {%}% + +[Show detailed mapping] + +Approve? (Y/N/adjust)" +``` + +**EXPRESS Mode:** +- Generate mapping automatically +- Use highest confidence matches +- Save to file +- Proceed automatically + +**3.5 Phase 3D: Generation (Per-Job)** + +**Same process as single-job workflow (SKILL.md Phase 4):** + +``` +├─ Generate Markdown resume +├─ Generate DOCX resume (using document-skills:docx) +├─ Generate Report +└─ No checkpoint - just generate files +``` + +Output files: +- `{Name}_{Company}_{Role}_Resume.md` +- `{Name}_{Company}_{Role}_Resume.docx` +- `{Name}_{Company}_{Role}_Resume_Report.md` + +All saved to: `job-{N}-{company-slug}/` + +**3.6 Progress Tracking:** + +After each job completes: + +``` +"✓ Job {N}/{total} complete: {Company} - {Role} + +QUALITY METRICS: +- JD Coverage: {%}% +- Direct Matches: {%}% +- Files: ✓ MD ✓ DOCX ✓ Report + +Jobs remaining: {total - N} +Estimated time: ~{N * 8} minutes + +Continue to Job {N+1}? (Y/N/pause)" +``` + +**3.7 Pause/Resume Support:** + +If user says "pause": +``` +"Progress saved! + +CURRENT STATE: +- Jobs completed: {N} +- Jobs remaining: {total - N} +- Next: Job {N+1} - {Company} - {Role} + +To resume later, say 'resume batch {batch_id}' or 'continue my batch'." +``` + +Save batch state with current progress. + +## Phase 4: Batch Finalization + +**Goal:** Present all resumes for review, handle batch-level actions, update library + +**4.1 Generate Batch Summary:** + +Create `_batch_summary.md`: + +```markdown +# Batch Summary +**Batch ID:** batch-2025-11-04-job-search +**Created:** 2025-11-04T10:30:00Z +**Completed:** 2025-11-04T14:15:00Z +**Total Time:** 3 hours 45 minutes + +## Job Summaries + +### Job 1: Principal PM - Microsoft 1ES +- **Status:** Completed ✓ +- **Coverage:** 85% +- **Direct Matches:** 78% +- **Key Strengths:** Azure infrastructure, cross-functional leadership, CI/CD +- **Remaining Gaps:** None critical +- **Files:** + - Varun_Ramesh_Microsoft_1ES_Principal_PM_Resume.md + - Varun_Ramesh_Microsoft_1ES_Principal_PM_Resume.docx + - Varun_Ramesh_Microsoft_1ES_Principal_PM_Resume_Report.md + +### Job 2: Senior TPM - Google Cloud Infrastructure +- **Status:** Completed ✓ +- **Coverage:** 88% +- **Direct Matches:** 72% +- **Key Strengths:** Kubernetes experience, distributed systems, technical depth +- **Remaining Gaps:** GCP-specific (low priority, addressed in summary) +- **Files:** + - Varun_Ramesh_Google_Cloud_Senior_TPM_Resume.md + - Varun_Ramesh_Google_Cloud_Senior_TPM_Resume.docx + - Varun_Ramesh_Google_Cloud_Senior_TPM_Resume_Report.md + +### Job 3: Senior PM - AWS Container Services +- **Status:** Completed ✓ +- **Coverage:** 78% +- **Direct Matches:** 68% +- **Key Strengths:** Container orchestration, program management, technical leadership +- **Remaining Gaps:** AWS-specific (noted in cover letter recommendations) +- **Files:** + - Varun_Ramesh_AWS_Container_Senior_PM_Resume.md + - Varun_Ramesh_AWS_Container_Senior_PM_Resume.docx + - Varun_Ramesh_AWS_Container_Senior_PM_Resume_Report.md + +## Batch Statistics + +### Discovery Impact +- **New experiences discovered:** 5 +- **Experiences integrated:** 5 +- **Average coverage improvement:** +16% +- **Time saved vs sequential:** ~15 minutes (shared discovery) + +### Coverage Metrics +- **Average JD coverage:** 84% +- **Average direct matches:** 73% +- **Total files created:** 9 (3 × MD + DOCX + Report) + +### Gap Resolution +- **Starting gaps:** 14 unique gaps +- **Gaps resolved through discovery:** 9 +- **Remaining gaps:** 5 + - 0 critical (100% critical gap resolution) + - 2 important (50% important gap resolution) + - 3 job-specific (handled in cover letters) + +## Files Location + +``` +resumes/batches/batch-2025-11-04-job-search/ +├── _batch_state.json +├── _aggregate_gaps.md +├── _discovered_experiences.md +├── _batch_summary.md (this file) +├── job-1-microsoft/ +│ ├── success_profile.md +│ ├── template.md +│ ├── content_mapping.md +│ ├── Varun_Ramesh_Microsoft_1ES_Principal_PM_Resume.md +│ ├── Varun_Ramesh_Microsoft_1ES_Principal_PM_Resume.docx +│ └── Varun_Ramesh_Microsoft_1ES_Principal_PM_Resume_Report.md +├── job-2-google/ +│ └── (same structure, 6 files) +└── job-3-aws/ + └── (same structure, 6 files) +``` + +## Recommendations + +### Interview Prep +- Prepare Kubernetes stories (appears in all 3 jobs) +- Emphasize cross-functional leadership +- Practice articulating CI/CD experience with metrics + +### Cover Letter Focus +- Job 1 (Microsoft): Emphasize internal Azure knowledge, 1ES mission alignment +- Job 2 (Google): Address GCP learning plan, highlight distributed systems thinking +- Job 3 (AWS): Address AWS learning plan, emphasize container orchestration transferability + +### Application Priority +Based on coverage scores and fit: +1. **Job 2 (Google):** Highest coverage (88%), strong technical fit +2. **Job 1 (Microsoft):** Strong coverage (85%), internal opportunity +3. **Job 3 (AWS):** Good coverage (78%), but more gaps to address in materials +``` + +**4.2 Present to User:** + +``` +"All 3 resumes generated! 🎉 + +JOB SUMMARIES: +┌─────────────────────────────────────────────────────────────┐ +│ Job 1: Principal PM - Microsoft 1ES │ +│ Coverage: 85% | Direct: 78% | Files: ✓ MD ✓ DOCX ✓ Report │ +│ Key strengths: Azure infra, cross-functional leadership │ +│ Remaining gaps: None critical │ +└─────────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────────┐ +│ Job 2: Senior TPM - Google Cloud Infrastructure │ +│ Coverage: 88% | Direct: 72% | Files: ✓ MD ✓ DOCX ✓ Report │ +│ Key strengths: Kubernetes, distributed systems │ +│ Remaining gaps: GCP-specific (low priority) │ +└─────────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────────┐ +│ Job 3: Senior PM - AWS Container Services │ +│ Coverage: 78% | Direct: 68% | Files: ✓ MD ✓ DOCX ✓ Report │ +│ Key strengths: Container orchestration, program mgmt │ +│ Remaining gaps: AWS-specific (cover letter) │ +└─────────────────────────────────────────────────────────────┘ + +BATCH STATISTICS: +- New experiences discovered: 5 +- Average coverage improvement: +16% +- Total files: 9 (3 jobs × MD + DOCX + Report) +- Time saved vs sequential: ~15 minutes + +FILES: resumes/batches/batch-2025-11-04-job-search/ + +REVIEW OPTIONS: +1. APPROVE ALL - Save all resumes to library +2. REVIEW INDIVIDUALLY - Approve/revise each resume separately +3. REVISE BATCH - Make changes across multiple resumes +4. SAVE BUT DON'T UPDATE LIBRARY - Keep files, don't enrich library + +Which option? (1/2/3/4)" +``` + +**4.3 Handle Review Option 1 (APPROVE ALL):** + +``` +User chooses: 1 + +Process: +1. Copy all resume files to library directory +2. Add all discovered experiences to library database +3. Tag with metadata (batch_id, target_company, target_role, etc.) +4. Rebuild library indices +5. Update batch state to "completed" + +Output: +"✓ All resumes saved to library! + +LIBRARY UPDATED: +- New resumes: 3 +- New experiences: 5 +- Total resumes in library: 32 + +These experiences are now available for future applications. + +Good luck with your applications! 🚀" +``` + +**4.4 Handle Review Option 2 (REVIEW INDIVIDUALLY):** + +``` +User chooses: 2 + +For each job: + Show JD requirements vs resume coverage + Highlight newly discovered experiences used + Ask: "Approve Job {N}? (Y/N/revise)" + + If Y: Add to library + If N: Don't add to library + If revise: Collect feedback, make changes, re-ask + +After all reviewed: +"Review complete! + +LIBRARY UPDATED: +- Approved resumes: {N} +- Skipped resumes: {M} +- Revised resumes: {K} + +Total resumes in library: {count}" +``` + +**4.5 Handle Review Option 3 (REVISE BATCH):** + +``` +User chooses: 3 + +Prompt: +"What would you like to change across the batch? + +COMMON BATCH REVISIONS: +- 'Make all summaries shorter' +- 'Emphasize leadership more in all resumes' +- 'Remove mentions of X technology from all' +- 'Use title \"Senior Technical Program Manager\" consistently' +- 'Add bullets about Y experience to all resumes' + +Your revision request:" + +Process: +1. Collect revision request +2. Determine which jobs affected +3. Re-run matching/generation for affected jobs +4. Present revised resumes +5. Ask for approval again + +Loop until user approves or cancels. +``` + +**4.6 Handle Review Option 4 (SAVE BUT DON'T UPDATE LIBRARY):** + +``` +User chooses: 4 + +Output: +"✓ Files saved to: resumes/batches/batch-2025-11-04-job-search/ + +Not added to library. You can manually move them later if desired. + +Batch state preserved for future reference." +``` + +**4.7 Update Final Batch State:** + +```json +{ + "batch_id": "batch-2025-11-04-job-search", + "current_phase": "completed", + "completed_at": "2025-11-04T14:15:00Z", + "jobs": [ + { + "job_id": "job-1", + "status": "completed", + "files_generated": true, + "added_to_library": true + } + ], + "statistics": { + "total_jobs": 3, + "completed_jobs": 3, + "new_experiences": 5, + "average_coverage": 84, + "total_time_minutes": 225 + } +} +``` + +## Incremental Batch Support + +**Goal:** Add new jobs to existing batches without re-doing completed work + +**Scenario:** User processes 3 jobs today, finds 2 more jobs next week + +**8.1 Detect Add Request:** + +User says: +- "Add another job to my batch" +- "I found 2 more jobs" +- "Resume batch {batch_id} and add jobs" + +**8.2 Load Existing Batch:** + +```python +# Pseudo-code +batch = load_batch_state(batch_id) + +if batch.current_phase == "completed": + print("Batch already completed. Creating extension...") + batch.current_phase = "intake" # Reopen for new jobs +``` + +**8.3 Intake New Jobs:** + +Same process as Phase 0, but: +- Append to existing batch.jobs list +- Assign new job_ids (continue numbering: job-4, job-5, etc.) + +``` +"Adding jobs to existing batch: {batch_id} + +CURRENT BATCH: +- Job 1: Microsoft - Principal PM (completed ✓) +- Job 2: Google - Senior TPM (completed ✓) +- Job 3: AWS - Senior PM (completed ✓) + +NEW JOBS TO ADD: + +Provide job description for Job 4: [user input] +[... collect JD, company, role, priority, notes ...] + +Add another job? (Y/N) +``` + +**8.4 Incremental Gap Analysis:** + +``` +"Running incremental gap analysis for new jobs... + +NEW JOBS: +- Job 4 (Stripe): Payment Systems Engineer +- Job 5 (Meta): Senior TPM + +COVERAGE WITH EXISTING LIBRARY: +(Library now includes 5 experiences discovered in previous session) + +- Job 4 (Stripe): 82% coverage +- Job 5 (Meta): 75% coverage + +NEW GAPS (not covered by previous discoveries): +- Payment systems experience (Job 4 only) 🔵 +- Large-scale social networking (Job 5 only) 🔵 +- React/frontend (Jobs 4, 5) 🟡 + +ALREADY COVERED FROM PREVIOUS BATCH: +✓ Kubernetes (from previous batch) +✓ CI/CD (from previous batch) +✓ Cross-functional leadership (from previous batch) + +NEW GAP COUNT: 3 (vs 14 in original batch) +Estimated discovery time: 5-10 minutes (vs 30-40 for original batch) + +Ready for incremental discovery? (Y/N)" +``` + +**8.5 Incremental Discovery:** + +Only ask about NEW gaps: + +```python +# Pseudo-code +previous_gaps = set(batch.aggregate_gaps.all_gap_names()) +new_gaps = [] + +for job in new_jobs: + for gap in job.gaps: + if gap.name not in previous_gaps: + new_gaps.append(gap) + +# Run discovery ONLY for new_gaps +conduct_discovery(new_gaps) +``` + +**Important:** Don't re-ask questions already answered in previous session. + +**8.6 Process New Jobs:** + +Run Phase 3 (per-job processing) for new jobs only: +- Job 4: Research → Template → Matching → Generation +- Job 5: Research → Template → Matching → Generation + +**8.7 Update Batch Summary:** + +Add new jobs to `_batch_summary.md`: + +```markdown +## Incremental Addition (2025-11-11) + +Added 2 new jobs to batch after initial completion. + +### Job 4: Payment Systems Engineer - Stripe +... [same format as original jobs] + +### Job 5: Senior TPM - Meta +... [same format as original jobs] + +## Updated Statistics +- Total jobs: 5 (original 3 + added 2) +- New experiences discovered (incremental): 3 +- Total experiences discovered: 8 +``` + +**8.8 Final Output:** + +``` +"Incremental batch processing complete! + +ORIGINAL BATCH (2025-11-04): +✓ Job 1: Microsoft +✓ Job 2: Google +✓ Job 3: AWS + +NEW JOBS (2025-11-11): +✓ Job 4: Stripe +✓ Job 5: Meta + +NEW DISCOVERIES: 3 +TIME SAVED: ~25 minutes (avoided re-asking 8 previous gaps) + +All 5 resumes available in: resumes/batches/{batch_id}/ + +Add to library? (Y/N)" +``` + +## Error Handling & Edge Cases + +### Edge Case 1: Jobs Are More Diverse Than Expected + +**Detection:** During gap analysis, <40% gap overlap between jobs + +**Scenario:** User provides 5 jobs, but they're quite different (TPM, Data Scientist, Product Marketing Manager) + +**Handling:** + +``` +"⚠️ I notice these jobs are quite diverse: + +JOB SIMILARITY ANALYSIS: +- Jobs 1, 2 (TPM roles): 85% requirement overlap ✓ +- Jobs 3, 4 (Data Science): 80% requirement overlap ✓ +- Job 5 (Marketing): Only 20% overlap with others ⚠️ + +RECOMMENDATION: +Split into batches for better efficiency: +- Batch A: Jobs 1, 2 (TPM-focused discovery) +- Batch B: Jobs 3, 4 (Data Science-focused discovery) +- Job 5: Process separately (single-job mode) + +OR proceed with unified discovery (will take longer, ~50 minutes) + +Your preference: +1. Split into batches (recommended) +2. Continue with unified discovery +3. Remove dissimilar jobs" +``` + +### Edge Case 2: Discovery Reveals Experience Relevant to Only 1 Job + +**Scenario:** During discovery for "multi-job gap," user's answer only addresses one job + +**Example:** + +``` +SKILL: "Kubernetes appears in all 3 jobs. Have you worked with it?" +USER: "Only in Azure (AKS), not GCP or AWS" + +HANDLING: +"Got it - this addresses the Microsoft job (Azure AKS) but not Google (GKE) +or AWS (EKS). + +I'll tag this as Microsoft-specific. + +Should I still explore Kubernetes more broadly? You might have experience +with container orchestration concepts that transfer across cloud providers. + +Options: +1. YES - Explore general container orchestration +2. NO - Move to next gap + +Your choice? (1/2)" + +[If 1, branch to adjacent probe for container orchestration concepts] +``` + +### Edge Case 3: One Job's Research Fails + +**Scenario:** Company is obscure startup, minimal online presence + +**Handling:** + +``` +"⚠️ Limited research data for Job 3 (StartupXYZ): +- No company website or information found +- No LinkedIn profiles for similar roles +- No news or press coverage + +FALLBACK: JD-only analysis for this job + +OPTIONS: +1. CONTINUE - Proceed with JD-only (recommended) +2. SKIP JOB 3 - Process others, come back later +3. PROVIDE CONTEXT - You share company info manually +4. REMOVE JOB 3 - Remove from batch entirely + +Other jobs (1, 2) have sufficient research data. + +Your preference? (1/2/3/4)" + +[Don't let one failure block the entire batch] +``` + +### Edge Case 4: User Wants to Add/Remove Jobs Mid-Process + +**Add Job Mid-Process:** + +``` +USER: "I want to add another job to the batch" + +SKILL: "Adding Job {N+1} to batch. + +CURRENT PHASE: {current_phase} + +Since we've already completed discovery: +- If Job {N+1} has NEW gaps: We'll do quick incremental discovery +- If existing discoveries cover it: Proceed directly to processing + +Let me collect the job details..." + +[Run quick gap check, incremental discovery if needed, then process] +``` + +**Remove Job Mid-Process:** + +``` +USER: "Remove Job 2 from the batch" + +SKILL: "Removing Job 2 from batch. + +ALREADY-DISCOVERED EXPERIENCES: +- Will remain in library +- Still available for other jobs in batch +- Available for future batches + +BATCH UPDATED: +- Jobs remaining: {N-1} +- Job 2 files will be moved to archive (not deleted) + +Continue with remaining jobs? (Y/N)" +``` + +### Edge Case 5: Library Update Conflicts + +**Scenario:** User approves Job 1 resume, rejects Job 2, wants to revise Job 3 + +**Handling:** + +``` +"You've approved some jobs but not others. + +LIBRARY UPDATE OPTIONS: + +1. INDIVIDUAL APPROVAL (recommended) + - Add Job 1 to library now ✓ + - Job 2 won't be added ✗ + - Job 3 pending revision (decide after changes) + +2. BATCH APPROVAL (wait) + - Update library only when all jobs finalized + - Ensures consistency across batch + +3. SELECTIVE APPROVAL + - Choose which jobs to add now + - Which experiences to integrate + +Recommendation: Option 1 (individual approval) + +Your preference? (1/2/3)" +``` + +### Edge Case 6: Batch Processing Interrupted + +**Scenario:** User stops mid-batch, connection drops, or session ends + +**Auto-Save Behavior:** + +After each major milestone, auto-save batch state: +- After each job completes +- After discovery phase +- After gap analysis +- After user checkpoints + +**Resume Instructions:** + +``` +"Your batch processing was interrupted. + +SAVED STATE: +- Batch ID: {batch_id} +- Completed: Jobs 1, 2 +- In Progress: Job 3 (template approved, matching not started) +- Pending: Jobs 4, 5 + +To resume: +- Say 'resume batch {batch_id}' +- Or 'continue my batch' +- Or provide batch ID when asked + +I'll pick up exactly where we left off." +``` + +### Edge Case 7: No Gaps Found + +**Scenario:** All jobs are well-covered by existing library (rare but possible) + +**Handling:** + +``` +"Gap analysis complete! + +COVERAGE SUMMARY: +- Job 1: 92% coverage +- Job 2: 89% coverage +- Job 3: 87% coverage + +ALL GAPS ADDRESSABLE WITH EXISTING LIBRARY ✓ + +No experience discovery needed - your library already covers these roles well. + +OPTIONS: +1. SKIP DISCOVERY - Proceed directly to per-job processing (recommended) +2. OPTIONAL DISCOVERY - Surface any additional experiences anyway +3. REVIEW GAPS - See what small gaps exist + +Your preference? (1/2/3)" +``` + +### Error Recovery Principles + +1. **Never lose progress:** Auto-save batch state frequently +2. **Partial success is success:** Some jobs completing is better than none +3. **Transparent failures:** Always explain what went wrong and options +4. **Graceful degradation:** Fall back to JD-only, single-job mode, or skip if needed +5. **User control:** Always provide options, never force a path + +### Graceful Degradation Paths + +``` +Research fails → Fall back to JD-only analysis +Library too small → Emphasize discovery phase +WebSearch unavailable → Use cached data or skip research +DOCX generation fails → Provide markdown only +One job fails → Continue with others, revisit failed job later +``` diff --git a/codex/research-prompts.md b/codex/research-prompts.md new file mode 100644 index 0000000..b4428bd --- /dev/null +++ b/codex/research-prompts.md @@ -0,0 +1,93 @@ +# Research Phase Prompts + +## Job Description Parsing + +**Prompt template:** +``` +Analyze this job description and extract: + +1. EXPLICIT REQUIREMENTS (must-have vs nice-to-have) +2. TECHNICAL KEYWORDS and domain terminology +3. IMPLICIT PREFERENCES (cultural signals, hidden requirements) +4. RED FLAGS (overqualification risks, mismatches) +5. ROLE ARCHETYPE (IC technical / people leadership / cross-functional) + +Job Description: +{JD_TEXT} + +Output as structured sections. +``` + +## Company Research + +**WebSearch queries:** +``` +1. "{company_name} mission values culture" +2. "{company_name} engineering blog" +3. "{company_name} recent news product launches" +4. "{company_name} team structure engineering" +``` + +**Synthesis prompt:** +``` +Based on these search results, summarize: + +1. Company mission and values +2. Cultural priorities +3. Business model and customer base +4. Team structure (if available) +5. Company stage (startup/growth/mature) and implications + +Search results: +{SEARCH_RESULTS} +``` + +## Role Benchmarking + +**WebSearch + WebFetch strategy:** +``` +1. Search: "site:linkedin.com {job_title} {company_name}" +2. Fetch: Top 3-5 profiles +3. Fallback: "site:linkedin.com {job_title} {similar_company}" +``` + +**Analysis prompt:** +``` +Analyze these LinkedIn profiles for people in similar roles: + +Extract patterns: +1. Common backgrounds and career paths +2. Emphasized skills and project types +3. Terminology they use to describe similar work +4. Notable accomplishments or themes + +Profiles: +{PROFILE_DATA} +``` + +## Success Profile Synthesis + +**Synthesis prompt:** +``` +Combine job description analysis, company research, and role benchmarking into: + +## Success Profile: {Role} at {Company} + +### Core Requirements (Must-Have) +- {Requirement}: {Evidence from JD/research} + +### Valued Capabilities (Nice-to-Have) +- {Capability}: {Why it matters in this context} + +### Cultural Fit Signals +- {Value}: {How to demonstrate} + +### Narrative Themes +- {Theme}: {Examples from similar role holders} + +### Terminology Map +Standard term → Company-preferred term + +### Risk Factors +- {Concern}: {Mitigation strategy} +``` diff --git a/codex/scripts/export_resume.py b/codex/scripts/export_resume.py new file mode 100755 index 0000000..fc8de00 --- /dev/null +++ b/codex/scripts/export_resume.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +"""Export resume markdown to DOCX and/or PDF using local CLI tools. + +Usage: + python3 scripts/export_resume.py --input out/Resume.md --formats docx,pdf +""" + +from __future__ import annotations + +import argparse +import shutil +import subprocess +from pathlib import Path + + +def run(cmd: list[str]) -> tuple[int, str]: + try: + result = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + check=False, + ) + except FileNotFoundError: + return 127, f"command not found: {cmd[0]}" + return result.returncode, (result.stdout or "").strip() + + +def has(binary: str) -> bool: + return shutil.which(binary) is not None + + +def export_docx(input_md: Path, output_docx: Path) -> bool: + if has("pandoc"): + code, out = run(["pandoc", str(input_md), "-o", str(output_docx)]) + if code == 0 and output_docx.exists(): + print(f"[ok] DOCX via pandoc -> {output_docx}") + return True + print(f"[warn] pandoc DOCX export failed: {out}") + + # textutil fallback may lose markdown structure but still produces a DOCX. + if has("textutil"): + code, out = run( + [ + "textutil", + "-convert", + "docx", + str(input_md), + "-output", + str(output_docx), + ] + ) + if code == 0 and output_docx.exists(): + print(f"[ok] DOCX via textutil fallback -> {output_docx}") + return True + print(f"[warn] textutil DOCX export failed: {out}") + + print("[error] Could not generate DOCX (missing/failed converters).") + return False + + +def export_pdf(input_md: Path, output_pdf: Path, docx_hint: Path | None) -> bool: + if has("pandoc"): + attempts = [ + ["pandoc", str(input_md), "-o", str(output_pdf)], + ["pandoc", str(input_md), "-o", str(output_pdf), "--pdf-engine=xelatex"], + ["pandoc", str(input_md), "-o", str(output_pdf), "--pdf-engine=pdflatex"], + ["pandoc", str(input_md), "-o", str(output_pdf), "--pdf-engine=wkhtmltopdf"], + ["pandoc", str(input_md), "-o", str(output_pdf), "--pdf-engine=weasyprint"], + ] + for cmd in attempts: + code, out = run(cmd) + if code == 0 and output_pdf.exists(): + print(f"[ok] PDF via {' '.join(cmd[:2])} -> {output_pdf}") + return True + if out: + print(f"[warn] pandoc PDF attempt failed: {out}") + + if has("soffice") and docx_hint and docx_hint.exists(): + outdir = str(output_pdf.parent) + code, out = run( + [ + "soffice", + "--headless", + "--convert-to", + "pdf", + "--outdir", + outdir, + str(docx_hint), + ] + ) + expected = output_pdf.parent / f"{docx_hint.stem}.pdf" + if code == 0 and expected.exists(): + if expected != output_pdf: + expected.replace(output_pdf) + print(f"[ok] PDF via soffice from DOCX -> {output_pdf}") + return True + print(f"[warn] soffice PDF export failed: {out}") + + print("[error] Could not generate PDF (missing/failed converters).") + return False + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Export resume markdown to DOCX/PDF using local tools." + ) + parser.add_argument("--input", required=True, help="Path to markdown resume") + parser.add_argument( + "--formats", + default="docx,pdf", + help="Comma-separated formats to generate (docx,pdf)", + ) + parser.add_argument( + "--output-dir", + default=None, + help="Optional output directory (default: input file directory)", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + input_md = Path(args.input).expanduser().resolve() + if not input_md.exists(): + print(f"[error] input file not found: {input_md}") + return 1 + + requested = {fmt.strip().lower() for fmt in args.formats.split(",") if fmt.strip()} + unknown = requested - {"docx", "pdf"} + if unknown: + print(f"[error] unsupported formats: {', '.join(sorted(unknown))}") + return 1 + + outdir = ( + Path(args.output_dir).expanduser().resolve() + if args.output_dir + else input_md.parent.resolve() + ) + outdir.mkdir(parents=True, exist_ok=True) + + base = input_md.stem + output_docx = outdir / f"{base}.docx" + output_pdf = outdir / f"{base}.pdf" + + ok = True + docx_generated = False + if "docx" in requested: + docx_generated = export_docx(input_md, output_docx) + ok = ok and docx_generated + + if "pdf" in requested: + docx_hint = output_docx if docx_generated or output_docx.exists() else None + pdf_generated = export_pdf(input_md, output_pdf, docx_hint) + ok = ok and pdf_generated + + return 0 if ok else 2 + + +if __name__ == "__main__": + raise SystemExit(main())