diff --git a/ai/generative-ai-service/hr-goal-alignment/.gitignore b/ai/generative-ai-service/hr-goal-alignment/.gitignore index d69ef29a7..378f3b7d9 100644 --- a/ai/generative-ai-service/hr-goal-alignment/.gitignore +++ b/ai/generative-ai-service/hr-goal-alignment/.gitignore @@ -1,8 +1,8 @@ **/__pycache__/ venv/ -files/venv/ -**/config.py +config.py .DS_Store data/ *.log *.txt +!requirements.txt \ No newline at end of file diff --git a/ai/generative-ai-service/hr-goal-alignment/README.md b/ai/generative-ai-service/hr-goal-alignment/README.md index d4489713d..987793889 100644 --- a/ai/generative-ai-service/hr-goal-alignment/README.md +++ b/ai/generative-ai-service/hr-goal-alignment/README.md @@ -28,8 +28,9 @@ See [LICENSE](./LICENSE) for more details. --- +> ORACLE AND ITS AFFILIATES DO NOT PROVIDE ANY WARRANTY WHATSOEVER, EXPRESS OR IMPLIED, FOR ANY SOFTWARE, MATERIAL OR CONTENT OF ANY KIND CONTAINED OR PRODUCED WITHIN THIS REPOSITORY, AND IN PARTICULAR SPECIFICALLY DISCLAIM ANY AND ALL IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. FURTHERMORE, ORACLE AND ITS AFFILIATES DO NOT REPRESENT THAT ANY CUSTOMARY SECURITY REVIEW HAS BEEN PERFORMED WITH RESPECT TO ANY SOFTWARE, MATERIAL OR CONTENT CONTAINED OR PRODUCED WITHIN THIS REPOSITORY. IN ADDITION, AND WITHOUT LIMITING THE FOREGOING, THIRD PARTIES MAY HAVE POSTED SOFTWARE, MATERIAL OR CONTENT TO THIS REPOSITORY WITHOUT ANY REVIEW. USE AT YOUR OWN RISK. ## Disclaimer -ORACLE AND ITS AFFILIATES DO NOT PROVIDE ANY WARRANTY WHATSOEVER, EXPRESS OR IMPLIED, FOR ANY SOFTWARE, MATERIAL OR CONTENT OF ANY KIND CONTAINED OR PRODUCED WITHIN THIS REPOSITORY, AND IN PARTICULAR SPECIFICALLY DISCLAIM ANY AND ALL IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. FURTHERMORE, ORACLE AND ITS AFFILIATES DO NOT REPRESENT THAT ANY CUSTOMARY SECURITY REVIEW HAS BEEN PERFORMED WITH RESPECT TO ANY SOFTWARE, MATERIAL OR CONTENT CONTAINED OR PRODUCED WITHIN THIS REPOSITORY. IN ADDITION, AND WITHOUT LIMITING THE FOREGOING, THIRD PARTIES MAY HAVE POSTED SOFTWARE, MATERIAL OR CONTENT TO THIS REPOSITORY WITHOUT ANY REVIEW. USE AT YOUR OWN RISK. +ORACLE AND ITS AFFILIATES DO NOT PROVIDE ANY WARRANTY WHATSOEVER, EXPRESS OR IMPLIED, FOR ANY SOFTWARE, MATERIAL OR CONTENT OF ANY KIND CONTAINED OR PRODUCED WITHIN THIS REPOSITORY, AND IN PARTICULAR SPECIFICALLY DISCLAIM ANY AND ALL IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. FURTHERMORE, ORACLE AND ITS AFFILIATES DO NOT REPRESENT THAT ANY CUSTOMARY SECURITY REVIEW HAS BEEN PERFORMED WITH RESPECT TO ANY SOFTWARE, MATERIAL OR CONTENT CONTAINED OR PRODUCED WITHIN THIS REPOSITORY. IN ADDITION, AND WITHOUT LIMITING THE FOREGOING, THIRD PARTIES MAY HAVE POSTED SOFTWARE, MATERIAL OR CONTENT TO THIS REPOSITORY WITHOUT ANY REVIEW. USE AT YOUR OWN RISK. diff --git a/ai/generative-ai-service/hr-goal-alignment/files/Org_Chart.py b/ai/generative-ai-service/hr-goal-alignment/files/Org_Chart.py index 1bd37bce1..1fd36a97b 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/Org_Chart.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/Org_Chart.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import streamlit as st from urllib.parse import quote, unquote @@ -100,7 +99,7 @@ def create_org_chart(): """ # Display the chart with HTML component -st.components.v1.html(html_chart, height=520, scrolling=True) # type: ignore +st.components.v1.html(html_chart, height=520, scrolling=True) # Show details section for selected employee # ------------------------------------------------------------ @@ -182,5 +181,5 @@ def chip_style(val): else "background-color:#f8d7da;font-weight:bold" ) - styled = goals_pretty.style.applymap(chip_style, subset=[LABEL_COL]) # type: ignore + styled = goals_pretty.style.applymap(chip_style, subset=[LABEL_COL]) st.dataframe(styled, use_container_width=True, hide_index=True) diff --git a/ai/generative-ai-service/hr-goal-alignment/files/README.md b/ai/generative-ai-service/hr-goal-alignment/files/README.md index f547cf084..333232a39 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/README.md +++ b/ai/generative-ai-service/hr-goal-alignment/files/README.md @@ -51,6 +51,7 @@ The system integrates with Oracle Database and uses OCI's Generative AI models t ├── utils.py ├── requirements.txt └── README.md +``` ## Setup Instructions diff --git a/ai/generative-ai-service/hr-goal-alignment/files/app.py b/ai/generative-ai-service/hr-goal-alignment/files/app.py index 5e56cd49c..2ea78c6a5 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/app.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/app.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import streamlit as st from pathlib import Path @@ -30,4 +29,4 @@ pg = st.navigation(pages) -pg.run() +pg.run() \ No newline at end of file diff --git a/ai/generative-ai-service/hr-goal-alignment/files/config_template.py b/ai/generative-ai-service/hr-goal-alignment/files/config_template.py index 925e75d37..a51dbc565 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/config_template.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/config_template.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. # config_template.py # === OCI Configuration === diff --git a/ai/generative-ai-service/hr-goal-alignment/files/course_vector_utils.py b/ai/generative-ai-service/hr-goal-alignment/files/course_vector_utils.py index 48d2dc24d..1392a462e 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/course_vector_utils.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/course_vector_utils.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. from typing import List, Dict, Optional from langchain_community.vectorstores import OracleVS import os @@ -13,7 +12,6 @@ from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain_community.embeddings import OCIGenAIEmbeddings from langchain_core.documents import Document -from langchain_community.vectorstores.utils import DistanceStrategy # Use the project's config file import config @@ -62,7 +60,7 @@ def _initialize_vector_store(self) -> OracleVS: client=self.db_conn, # Use the connection passed in __init__ embedding_function=self.embeddings, # Use the embeddings initialized in __init__ table_name=config.VECTOR_TABLE_NAME, # Use table name from project config - distance_strategy=DistanceStrategy.COSINE + distance_strategy="COSINE" ) # LLM initialization removed. @@ -169,3 +167,5 @@ def similarity_search(self, query: str, k: int = 5) -> List[Document]: except Exception as e: logger.error(f"Error during similarity search: {e}", exc_info=True) return [] + + diff --git a/ai/generative-ai-service/hr-goal-alignment/files/data/Full_Company_Training_Catalog.xlsx b/ai/generative-ai-service/hr-goal-alignment/files/data/Full_Company_Training_Catalog.xlsx new file mode 100644 index 000000000..25a255d6d Binary files /dev/null and b/ai/generative-ai-service/hr-goal-alignment/files/data/Full_Company_Training_Catalog.xlsx differ diff --git a/ai/generative-ai-service/hr-goal-alignment/files/data_ingestion_courses.py b/ai/generative-ai-service/hr-goal-alignment/files/data_ingestion_courses.py index fb6473100..cf846c406 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/data_ingestion_courses.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/data_ingestion_courses.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import logging import sys import oracledb diff --git a/ai/generative-ai-service/hr-goal-alignment/files/gen_ai_service/inference.py b/ai/generative-ai-service/hr-goal-alignment/files/gen_ai_service/inference.py index aa007e471..257bc9eb8 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/gen_ai_service/inference.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/gen_ai_service/inference.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import json import logging import sys @@ -53,7 +52,8 @@ def recommend_courses(profile, feedback) -> dict: model = get_llm_model() response = {} try: - response = model.with_structured_output(schema).invoke( + # Get raw output first for debugging + raw_output = model.invoke( [ SystemMessage(content=prompt_sys), HumanMessage( @@ -63,10 +63,27 @@ def recommend_courses(profile, feedback) -> dict: ), ] ) + logger.info(f"Raw LLM response before parsing:\n{raw_output}") + # Now parse the structured output + result = model.with_structured_output(schema).invoke( + [ + SystemMessage(content=prompt_sys), + HumanMessage( + content=prompt_user.format( + EMPLOYEE_PROFILE=profile, MANAGER_FEEDBACK=feedback + ) + ), + ] + ) + if result is None: + logger.warning("GenAI model response couldn't be parsed into the expected schema.") + logger.warning("Consider inspecting the raw output.") + else: + response = dict(result) if not isinstance(result, dict) else result except Exception as e: logger.error(f"Error during recommended_courses execution: {e}") - - return response # type: ignore + logger.info(f"Returning response from recommend_courses: {response}") + return response @@ -82,7 +99,7 @@ def classify_smart_goal(goal_description) -> dict: except Exception as e: logger.error(f"Error during recommended_courses execution: {e}") - return json.loads(response.content) # type: ignore + return json.loads(response.content) prompt_sys = """ @@ -133,6 +150,58 @@ def classify_smart_goal(goal_description) -> dict: DO NOT wrap your response in code blocks, backticks, or any other formatting. Return ONLY the raw JSON object itself. """ + +new_prompt_user = """ +Based on the following information about an employee and their performance, recommend appropriate training courses. + +**Employee Profile:** +{EMPLOYEE_PROFILE} + +**Manager Feedback:** +{MANAGER_FEEDBACK} + +Your task: + +1. Identify exactly 3 skill-based focus areas for this employee to develop, prioritized according to: + - The most critical issues raised in the manager's feedback + - Then their job title and core responsibilities + - Then existing skills and experience level + +2. For each focus area, recommend exactly 2 course titles that: + - Explicitly include difficulty level in the title using: "(Beginner)", "(Intermediate)", or "(Advanced)" + - Address the skill gap clearly + - Are appropriate for the employee's experience + +**Output Format:** + +Return ONLY a JSON object with a single key: `"recommended_courses"` +- This key must map to an object +- Each key in that object is a focus area (string) +- Each value is a list of exactly two course titles (strings) + +**Example:** +{ + "recommended_courses": { + "Strategic Communication": [ + "Influential Communication for Technical Professionals (Intermediate)", + "Executive Presence and Presentation Skills (Advanced)" + ], + "Technical Leadership": [ + "Leading High-Performance Technical Teams (Intermediate)", + "Strategic Technical Decision Making (Advanced)" + ], + "Project Estimation": [ + "Fundamentals of Project Estimation (Beginner)", + "Advanced Techniques in Project Scoping and Estimation (Intermediate)" + ] + } +} + +⚠️ Do NOT include any explanations, markdown formatting, backticks, or extra text. Only return the raw JSON object as shown. +""" + + + prompt_user = """ Based on the following information about an employee and their performance, recommend appropriate training courses: diff --git a/ai/generative-ai-service/hr-goal-alignment/files/goal_alignment_backend.py b/ai/generative-ai-service/hr-goal-alignment/files/goal_alignment_backend.py index b8131b992..74a11b37d 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/goal_alignment_backend.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/goal_alignment_backend.py @@ -1,7 +1,7 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import oracledb import config import json +import uuid from langchain.prompts import PromptTemplate from langchain.schema import AIMessage from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI @@ -15,7 +15,7 @@ model_kwargs={"temperature": 0.1, "max_tokens": 500}, ) -def query_llm(prompt_template, inputs) -> str: +def query_llm(prompt_template, inputs): """Formats prompt and calls the Oracle LLM""" if not isinstance(prompt_template, PromptTemplate): raise TypeError(" query_llm expected a PromptTemplate object.") @@ -29,7 +29,7 @@ def query_llm(prompt_template, inputs) -> str: chain = RunnableSequence(prompt_template | llm) response = chain.invoke(inputs) - return response.content if isinstance(response, AIMessage) else str(response) # type: ignore + return response.content if isinstance(response, AIMessage) else response # --- Database Data Loading Functions --- @@ -98,6 +98,53 @@ def load_manager_briefing_from_db(connection, employee_id): # Return empty dict or raise error return briefing_data + + +def insert_goals_to_db(connection, employee_id, refined_goals): + """ + Inserts structured goals into the 'Goals' table using the expected schema. + Assumes metrics are stored as serialized JSON strings in the CLOB column. + """ + try: + cursor = connection.cursor() + + for goal in refined_goals: + # Required fields + goal_id = str(uuid.uuid4()) + title = goal.get("goal_title", "Untitled Goal")[:255] # Truncate to fit VARCHAR2(255) + objective = goal.get("objective", "") + + # Optional fields + metrics_list = goal.get("metrics", []) + timeline = goal.get("timeline", "TBD")[:255] # Truncate if needed + + # Convert metrics list to JSON string + if isinstance(metrics_list, list): + metrics_json = json.dumps(metrics_list, ensure_ascii=False, indent=2) + else: + metrics_json = json.dumps([str(metrics_list)]) + + # Insert into DB + cursor.execute(""" + INSERT INTO Goals (goal_id, employee_id, title, objective, metrics, timeline) + VALUES (:1, :2, :3, :4, :5, :6) + """, ( + goal_id, + employee_id, + title, + objective, + metrics_json, + timeline + )) + + connection.commit() + return f"{len(refined_goals)} goal(s) successfully added for employee {employee_id}." + + except oracledb.Error as error: + print("Error inserting goals:", error) + return f"⚠️ Failed to insert goals: {error}" + + # 🚀 Goal Alignment Functions (Database Version) def get_horizontal_peers(connection, employee_id): @@ -110,7 +157,7 @@ def get_horizontal_peers(connection, employee_id): JOIN Employees e2 ON e1.manager_id = e2.manager_id WHERE e1.employee_id = :1 AND e2.employee_id != :1 """, - (employee_id, employee_id) # 👈 Pass it twice + (employee_id, employee_id) ) peers = [row[0] for row in cursor.fetchall()] return peers @@ -143,7 +190,6 @@ def check_vertical_alignment_upward(connection, manager_id, employee_id): """Checks vertical alignment between an employee and their manager.""" try: cursor = connection.cursor() - # Fetch manager and employee data cursor.execute( """ @@ -494,32 +540,123 @@ def generate_final_recommendations(connection, employee_id): print("Error generating final recommendations:") print(error) return "Error generating final recommendations." - + def update_employee_goal_objective(connection, employee_id, new_objective): - """Updates the 'objective' field for a given employee in the Goals table.""" try: cursor = connection.cursor() - # Optionally fetch to check if a goal exists - cursor.execute( - "SELECT COUNT(*) FROM Goals WHERE employee_id = :1", - (employee_id,) - ) - if cursor.fetchone()[0] == 0: - return f"No goal entry found for employee ID {employee_id}." + # Extract numeric suffix and get max number from goal_id like 'goal_001' + cursor.execute(""" + SELECT COALESCE(MAX(TO_NUMBER(REGEXP_SUBSTR(goal_id, '\\d+$'))), 0) + 1 + FROM Goals + """) + next_number = cursor.fetchone()[0] + + # Format as goal_001, goal_002, etc. + next_goal_id = f"goal_{next_number:03d}" + + goal_title = "Updated Learning Goal" cursor.execute( """ - UPDATE Goals - SET objective = :1 - WHERE employee_id = :2 + INSERT INTO Goals (goal_id, employee_id, title, objective, metrics, timeline) + VALUES (:1, :2, :3, :4, NULL, NULL) """, - (new_objective, employee_id) + ( + next_goal_id, + str(employee_id), + goal_title, + new_objective + ) ) + connection.commit() - return "Goal objective updated successfully." + return f"Inserted new goal objective with ID {next_goal_id}." except oracledb.Error as error: - print("Error updating goal objective:") + print("Error inserting goal:") print(error) - return "Error updating goal objective." + return f"Error inserting new goal: {error}" + + +def extract_goal_lines(text): + """ + Extract meaningful refined goals from chatbot messages, bullet lists, or paragraphs. + Splits multi-goal messages into atomic blocks. + """ + lines = [] + + # Split by double line breaks, OR bullet symbols, OR numbered points + chunks = re.split(r'(?:(?:\n\s*\n)|(?:^\d+\.\s)|(?:^[-*•]))', text, flags=re.MULTILINE) + + for chunk in chunks: + chunk = chunk.strip() + if len(chunk) < 30: + continue + if any(kw in chunk.lower() for kw in [ + "next topic", "final recommendations", "let me know", "we’ve", "you could also" + ]): + continue + if re.match(r'^\*\*.+\*\*:$', chunk): # e.g., "**Refined Metrics:**" + continue + lines.append(chunk) + + return lines + + +def extract_structured_goal(raw_text): + prompt = PromptTemplate( + input_variables=["raw_text"], + template=""" +You are an assistant that extracts structured, database-ready goal records. +The input may contain markdown, bullet points, or informal formatting. + +Extract the following fields from the input: +- goal_title +- objective (one actionable sentence) +- metrics (list of concrete KPIs) +- timeline (inferred if not stated, e.g., "Q2 2025") +- stakeholders (list of names or roles mentioned) + +Respond ONLY in this JSON format: +{{ + "goal_title": "...", + "objective": "...", + "metrics": ["..."], + "timeline": "...", + "stakeholders": ["..."] +}} + +INPUT: +{raw_text} + """ + ) + + response = query_llm(prompt, {"raw_text": raw_text}) + try: + structured = json.loads(response) +# --- Sanitize --- + def clean_text(t): + return ( + t.replace('\\"', '"') + .replace("**", "") + .replace("”", '"') + .replace("“", '"') + .strip() + ) + + for key in ["goal_title", "objective", "timeline"]: + if key in structured and isinstance(structured[key], str): + structured[key] = clean_text(structured[key]) + + if "metrics" in structured and isinstance(structured["metrics"], list): + structured["metrics"] = [clean_text(m) for m in structured["metrics"]] + + if "stakeholders" in structured and isinstance(structured["stakeholders"], list): + structured["stakeholders"] = [clean_text(s) for s in structured["stakeholders"]] + + return structured + + except json.JSONDecodeError: + print("LLM returned non-JSON:\n", response) + return None \ No newline at end of file diff --git a/ai/generative-ai-service/hr-goal-alignment/files/org_chart_backend.py b/ai/generative-ai-service/hr-goal-alignment/files/org_chart_backend.py index 8a8e2d558..aa5e224e7 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/org_chart_backend.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/org_chart_backend.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import oracledb import pandas as pd @@ -6,24 +5,35 @@ from gen_ai_service.inference import classify_smart_goal from goal_alignment_backend import check_if_horizontal_aligned, check_if_vertical_aligned -connection = oracledb.connect( +try: + connection = oracledb.connect( **config.CONNECT_ARGS_VECTOR ) +except oracledb.Error as err: + print(f"Oracle connection error: {err}") + connection = None + + def mapping_all_employees() -> tuple[dict[str, list[str]], pd.DataFrame]: - query = ( - "SELECT employee_id, name, role, manager_id " - "FROM Employees" - ) - df_db = pd.DataFrame() # Initialize df_db + query = "SELECT employee_id, name, role, manager_id FROM Employees" try: - df_db = pd.read_sql(query, connection) # type: ignore + cursor = connection.cursor() + cursor.execute(query) + rows = cursor.fetchall() + + # Clean LOBs (just in case any column is a LOB) + clean_rows = [ + [col.read() if isinstance(col, oracledb.LOB) else col for col in row] + for row in rows + ] + + df_db = pd.DataFrame(clean_rows, columns=[desc[0] for desc in cursor.description]) + return mapping_from_df(df_db), df_db except Exception as err: print(f"Query failed: {err}") connection.close() - - # connection.close() - return mapping_from_df(df_db), df_db + return {}, pd.DataFrame() def build_label(df_row: pd.Series) -> str: @@ -56,21 +66,29 @@ def check_smart_goal(df_row: pd.Series) -> str: def fetch_goals_from_emp(df, emp_data) -> pd.DataFrame: - df_db = pd.DataFrame() # Initialize as empty DataFrame try: emp_id = search_employee(df, emp_data) - if emp_id: # Only proceed if emp_id is found - query = f"SELECT title, objective, metrics, timeline FROM Goals WHERE employee_id = '{emp_id}'" - df_db = pd.read_sql(query, connection) # type: ignore + query = f"SELECT title, objective, metrics, timeline FROM Goals WHERE employee_id = :1" + cursor = connection.cursor() + cursor.execute(query, (emp_id,)) + rows = cursor.fetchall() + + # Clean LOBs + clean_rows = [ + [col.read() if isinstance(col, oracledb.LOB) else col for col in row] + for row in rows + ] + + df_db = pd.DataFrame(clean_rows, columns=[desc[0] for desc in cursor.description]) + + # Check SMART criteria + df_db["smart"] = df_db.apply(check_smart_goal, axis=1) + + return df_db except oracledb.Error as err: print(f"Oracle connection error: {err}") - # df_db remains an empty DataFrame if an error occurs - - if not df_db.empty: # Check if DataFrame is not empty before applying - df_db["smart"] = df_db.apply(check_smart_goal, axis=1) - - return df_db + return pd.DataFrame() def search_employee(df, param): @@ -108,4 +126,4 @@ def check_goal_alignment(df, emp_data, manager_data): else: vertical = None - return vertical, horizontal + return vertical, horizontal \ No newline at end of file diff --git a/ai/generative-ai-service/hr-goal-alignment/files/pages/course_recommendation_chatbot.py b/ai/generative-ai-service/hr-goal-alignment/files/pages/course_recommendation_chatbot.py index 2ddc46567..dccaf819d 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/pages/course_recommendation_chatbot.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/pages/course_recommendation_chatbot.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import streamlit as st import logging import oracledb diff --git a/ai/generative-ai-service/hr-goal-alignment/files/pages/goal_alignment_chatbot.py b/ai/generative-ai-service/hr-goal-alignment/files/pages/goal_alignment_chatbot.py index 122669bd6..f30ca4156 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/pages/goal_alignment_chatbot.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/pages/goal_alignment_chatbot.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import streamlit as st import json import oracledb @@ -8,6 +7,7 @@ from utils import get_employee_id_by_name, generate_transcript_docx from langchain.prompts import PromptTemplate from goal_alignment_backend import ( + insert_goals_to_db, check_vertical_alignment_upward, check_horizontal_alignment, generate_final_recommendations, @@ -15,50 +15,339 @@ update_employee_goal_objective ) + + +@st.cache_resource +def get_db_connection(): + return oracledb.connect( + user=config.DB_USER, + password=config.DB_PASSWORD, + dsn=config.DB_DSN + ) +@st.cache_data(show_spinner=False) +def get_employee_and_manager_ids(employee_name): + connection = get_db_connection() + employee_id = get_employee_id_by_name(connection, employee_name) + if not employee_id: + return None, None + cursor = connection.cursor() + cursor.execute("SELECT manager_id FROM Employees WHERE employee_id = :1", (employee_id,)) + result = cursor.fetchone() + return employee_id, result[0] if result else None + +@st.cache_data(show_spinner=False) +def get_all_employees(): + connection = get_db_connection() + cursor = connection.cursor() + + # Use the confirmed schema: NAME and ROLE + cursor.execute("SELECT employee_id, name, role FROM Employees ORDER BY name") + results = cursor.fetchall() # list of tuples + + # Build label: "Alice Smith (HR Manager) — ID: 103" + employee_lookup = { + f"{name.strip()} ({role.strip()}) — ID: {emp_id}": emp_id + for emp_id, name, role in results + } + + return employee_lookup + + + # --- Helpers --- -def is_refinement_suggestion(text: str) -> bool: - keywords = [ - "i suggest refining", "consider adding a goal", "consider including", - "you could strengthen alignment by", "i recommend creating", "you could add", - "i suggest you add", "you could include" - ] - return any(kw in text.lower() for kw in keywords) + +# ────────────────────────────────────────────────────────────── +# Pull out individual goal sentences / bullet points +# ────────────────────────────────────────────────────────────── + +ACTION_VERBS = r'\b(conduct|collect|incorporate|achieve|align|improve|increase|add|ensure|expand|broaden)\b' +def extract_goal_lines(text: str) -> list[str]: + """ + Keep any line that has at least one digit/percent + and is longer than ~20 characters. + """ + cleaned = re.sub(r'[•–—►]', '-', text) + lines = [] + + for raw in cleaned.splitlines(): + line = raw.strip(" -*") + # skip narrative Insight/Alignment lines + if re.search(r'\b(insight|aligned|alignment|gap|mismatch|limit)\b', line, re.I): + # but keep it if it contains an action verb like add/achieve/conduct + if not re.search(r'\b(add|include|conduct|achieve|increase|improve|collect)\b', line, re.I): + continue + if len(line) < 20: + continue + if not (re.search(r'\d', line) or re.search(ACTION_VERBS, line, re.I)): + continue + lines.append(line) + + return lines + + +# ────────────────────────────────────────────────────────────── +# 3. Dumb structuring: title = first nouny chunk before colon +# ────────────────────────────────────────────────────────────── +TITLE_RE = re.compile(r'^[A-Z].{0,60}?[:\-–]') +TIMELINE_RE = re.compile( + r'\b(Q[1-4]\s*\d{2,4})\b' # Q4 2025 / Q1 26 + r'|(\b(?:H[12]|FY)\s*\d{2,4}\b)' # H1 2026 / FY 2025 + r'|(\b(?:end\s+of\s+)?Q[1-4]\s*\d{4}\b)', # End of Q4 2025 + flags=re.IGNORECASE +) + +def extract_structured_goal(line: str) -> dict | None: + """ + Returns a dict with clean title/metrics/timeline or None if no numbers present. + """ + if not re.search(r'\d', line): + return None + + # ① TIMELINE ----------------------------------------------------------- + m_time = TIMELINE_RE.search(line) + timeline = m_time.group(0).title().replace("End Of ", "") if m_time else "N/A" + + # strip timeline from line so its digits don't pollute metric list + core = line.replace(m_time.group(0), "") if m_time else line + # remove list markers like "1." / "2)" / "3 ." at the start + core = re.sub(r'^\s*\d+\s*[\.\)]\s*', '', core) + +# ② METRICS ---------------------------------------------------- + raw_metrics = re.findall( + r'''(?ix) + (? str: - match = re.search(r'your goal,.*?\"(.*?)\"', chat_message) - return match.group(1) if match else "Refined Goal" + # Try extracting quoted text after common phrases + match = re.search(r'["“](Achieve|Contribute|Align|Develop|Design|Support|Implement|Increase).*?["”]', chat_message, re.IGNORECASE) + if match: + return match.group(0).strip('“”" ') + + # Fallback: extract the first strong objective-like sentence + sentences = re.split(r'(?<=[.!?]) +', chat_message.strip()) + for sentence in sentences: + if re.search(r'\b(achieve|align|improve|contribute|support|implement|design|develop|increase|enhance)\b', sentence, re.IGNORECASE): + # Exclude generic vague patterns + if "your goal is" not in sentence.lower() and len(sentence.strip()) > 30: + return sentence.strip() + + # Final fallback + return "Achieve 75% employee participation in upskilling by Q2 2025." + def add_goal_refinement(goal_title, refined_objective): - existing = next((g for g in st.session_state.ga_goal_refinements if g["goal_title"] == goal_title), None) - if not existing: - st.session_state.ga_goal_refinements.append({ - "goal_title": goal_title, - "refined_objective": refined_objective, - "timestamp": datetime.utcnow().isoformat(), - "applied": False - }) + # Prevent duplicates + for existing in st.session_state.ga_goal_refinements: + if existing["goal_title"] == goal_title and existing["refined_objective"] == refined_objective: + return # Already exists + st.session_state.ga_goal_refinements.append({ + "goal_title": goal_title, + "refined_objective": refined_objective, + "timestamp": datetime.utcnow().isoformat(), + "applied": False + }) def render_refinement_action(refinement, idx): - st.markdown(f"**Goal:** {refinement['goal_title']}") - st.text_area("Refined Objective", value=refinement["refined_objective"], height=120, key=f"refined_{idx}", disabled=True) - if not refinement["applied"]: - if st.button(f"✅ Apply to DB (Goal: {refinement['goal_title']})", key=f"apply_{idx}"): - connection = None + st.markdown(f"### 🎯 Goal #{idx + 1}: {refinement.get('goal_title', 'Untitled Goal')}") + + # Determine whether it's a structured or unstructured refinement + if "objective" in refinement: + # Structured refinement + st.markdown(f"**Objective:** {refinement.get('objective', 'N/A')}") + st.markdown(f"**Metrics:** {'; '.join(refinement.get('metrics', [])) if refinement.get('metrics') else 'N/A'}") + st.markdown(f"**Timeline:** {refinement.get('timeline', 'N/A')}") + st.markdown(f"**Stakeholders:** {', '.join(refinement.get('stakeholders', [])) if refinement.get('stakeholders') else 'None listed'}") + else: + # Fallback: old unstructured refinement + st.markdown("**Refined Objective (Unstructured):**") + st.text_area("Text", value=refinement.get("refined_objective", ""), height=120, key=f"refined_{idx}", disabled=True) + + if not refinement.get("applied", False): + if st.button(f"✅ Apply to DB", key=f"apply_{idx}"): try: - connection = oracledb.connect( - user=config.DB_USER, - password=config.DB_PASSWORD, - dsn=config.DB_DSN - ) + connection = get_db_connection() employee_id = get_employee_id_by_name(connection, st.session_state.ga_employee_name) - msg = update_employee_goal_objective(connection, employee_id, refinement["refined_objective"]) + + # Determine which function to call based on structure + if "objective" in refinement: + from goal_alignment_backend import insert_goals_to_db + insert_goals_to_db(connection, employee_id, [refinement]) + else: + from goal_alignment_backend import update_employee_goal_objective + update_employee_goal_objective(connection, employee_id, refinement["refined_objective"]) + refinement["applied"] = True - st.success(f"Updated: {msg}") + st.success("✅ Goal successfully saved to database.") except Exception as e: - st.error(f"Error updating DB: {e}") - finally: - if connection: - connection.close() + st.error(f"❌ Error saving goal: {e}") + + st.markdown(f"**Status:** {'✅ Applied' if refinement.get('applied') else '🕒 Not Applied'}") + st.markdown("---") + + + +def _finalise_conversation(): + """ + One-shot routine that: + • harvests refinements from the whole chat + • appends a farewell + • flips the ‘conversation complete’ flag + + Streamlit will auto-rerun after the callback; no explicit st.rerun(). + """ + if st.session_state.ga_finalise_ran: + return # already executed in this session + + # ----- 2.1 harvest goal refinements from all chatbot messages + for msg in st.session_state.ga_chat_history: + bot_text = msg.get("Chatbot", "") + for line in extract_goal_lines(bot_text): + structured = extract_structured_goal(line) + if structured: + _deduped_append(structured) # helper below + else: + add_goal_refinement( + extract_possible_goal(line), # title + line # raw text + ) + + # ----- 2.2 add a single farewell once + st.session_state.ga_chat_history.append({ + "Chatbot": ( + "I hope you're feeling more confident in refining your goal sheet. " + "Best of luck as you continue developing your plan!" + ) + }) + + # ----- 2.3 mark complete & guard + st.session_state.ga_conversation_complete = True + st.session_state.ga_finalise_ran = True + +def _deduped_append(structured): + """Prevent duplicates in session_state.ga_goal_refinements""" + if not any( + r.get("objective") == structured.get("objective") and + r.get("goal_title") == structured.get("goal_title") + for r in st.session_state.ga_goal_refinements + ): + st.session_state.ga_goal_refinements.append({ + **structured, + "timestamp": datetime.utcnow().isoformat(), + "applied": False + }) + +def render_goal_review_tools(): + st.header("✍️ Apply Refined Goals to the Database") + + if not st.session_state.ga_goal_refinements: + st.info("No refined goals were detected in this session.") + return + + # Debug section (optional – remove in production) + with st.expander("Debug » Raw goal objects"): + st.json(st.session_state.ga_goal_refinements) + + # Per-goal UI + for idx, refinement in enumerate(st.session_state.ga_goal_refinements): + render_refinement_action(refinement, idx) + + # Bulk-apply button + unapplied = [r for r in st.session_state.ga_goal_refinements if not r["applied"]] + if unapplied and st.button("✅ Apply ALL refined goals"): + _apply_all_goals(unapplied) + +def _apply_all_goals(refinements): + """ + Bulk-save every un-applied goal object in *refinements*. + + Marks each refinement as applied and surfaces a Streamlit + success (or error) message per goal. + """ + if not refinements: + st.info("Nothing to apply.") + return + + try: + connection = get_db_connection() + employee_id = get_employee_id_by_name( + connection, + st.session_state.ga_employee_name + ) + + structured = [r for r in refinements if "objective" in r] + legacy = [r for r in refinements if "refined_objective" in r] + + # --- structured goals ------------------------------------- + if structured: + insert_goals_to_db(connection, employee_id, structured) + for r in structured: + r["applied"] = True + st.success(f"Inserted structured goal: {r['goal_title']}") + + # --- legacy / unstructured goals -------------------------- + for r in legacy: + update_employee_goal_objective( + connection, + employee_id, + r["refined_objective"] + ) + r["applied"] = True + st.success(f"Saved legacy goal: {r['refined_objective'][:60]}…") + + except Exception as e: + st.error(f"Bulk update failed: {e}") + + finally: + pass # --- Main Chatbot --- def run_goal_alignment_chatbot(): @@ -67,54 +356,71 @@ def run_goal_alignment_chatbot(): st.session_state.setdefault("ga_goal_refinements", []) st.session_state.setdefault("ga_chat_history", []) st.session_state.setdefault("ga_conversation_complete", False) + st.session_state.setdefault("ga_finalise_ran", False) + st.session_state.setdefault("ga_topics_done", set()) + st.session_state.setdefault("ga_current_topic", None) + employee_lookup = get_all_employees() - if "ga_report" not in st.session_state: - employee_name = st.text_input("Enter Employee Name:", key="ga_employee_input") - - if st.button("Generate Report", key="ga_generate_report_btn"): - with st.spinner("Connecting and compiling alignment report..."): - connection = None - try: - connection = oracledb.connect( - user=config.DB_USER, - password=config.DB_PASSWORD, - dsn=config.DB_DSN - ) - employee_id = get_employee_id_by_name(connection, employee_name) - - if not employee_id: - st.error(f"Employee '{employee_name}' not found in the database.") - return - - cursor = connection.cursor() - cursor.execute("SELECT manager_id FROM Employees WHERE employee_id = :1", (employee_id,)) - result = cursor.fetchone() - if not result: - st.error(f"No manager found for employee '{employee_name}'") - return - manager_id = result[0] - - compiled_report = { - "vertical_alignment": check_vertical_alignment_upward(connection, manager_id, employee_id), - "horizontal_alignment": check_horizontal_alignment(connection, employee_id), - "final_recommendations": generate_final_recommendations(connection, employee_id) - } - - st.session_state.ga_report = compiled_report - st.session_state.ga_employee_name = employee_name - st.success(f"Report compiled for {employee_name}!") - - report_json = json.dumps(compiled_report, indent=2) - st.download_button( - label="📄 Download JSON Report", - data=report_json, - file_name=f"goal_alignment_report_{employee_name}.json", - mime="application/json" - ) + selected_label = st.selectbox("Select an Employee:", list(employee_lookup.keys()), key="ga_employee_select") + employee_id = employee_lookup[selected_label] + employee_name = selected_label.split(" (")[0] # Just the name + + # Reset session state if a new employee is selected + # This ensures we start fresh for each employee + if ( + "ga_selected_employee" not in st.session_state + or st.session_state.ga_selected_employee != employee_id + ): + # ▼ New employee selected → wipe per-employee data + st.session_state.ga_selected_employee = employee_id + st.session_state.ga_goal_refinements = [] + st.session_state.ga_chat_history = [] + st.session_state.ga_conversation_complete = False + st.session_state.ga_finalise_ran = False + + if st.button("Generate Report", key="ga_generate_report_btn"): + try: + with st.spinner("Looking up employee and manager..."): + employee_id, manager_id = get_employee_and_manager_ids(employee_name) + + if not employee_id: + st.error(f"Employee '{employee_name}' not found in the database.") + return + if not manager_id: + st.error(f"No manager found for employee '{employee_name}'.") + return + + with st.spinner("Checking vertical alignment..."): + vertical = check_vertical_alignment_upward(get_db_connection(), manager_id, employee_id) + + with st.spinner("Checking horizontal alignment..."): + horizontal = check_horizontal_alignment(get_db_connection(), employee_id) + + with st.spinner("Generating final recommendations..."): + recommendations = generate_final_recommendations(get_db_connection(), employee_id) + + compiled_report = { + "vertical_alignment": vertical, + "horizontal_alignment": horizontal, + "final_recommendations": recommendations + } + + st.session_state.ga_report = compiled_report + st.session_state.ga_employee_name = employee_name + st.success(f"Report compiled for {employee_name}!") - initial_prompt = PromptTemplate( - input_variables=["employee_name", "final_recommendations"], - template=""" + report_json = json.dumps(compiled_report, indent=2) + st.download_button( + label="📄 Download JSON Report", + data=report_json, + file_name=f"goal_alignment_report_{employee_name}.json", + mime="application/json" + ) + + # Initial message from the chatbot + initial_prompt = PromptTemplate( + input_variables=["employee_name", "final_recommendations"], + template=""" You are a career mentor guiding {employee_name}. **Initial Guidance:** @@ -125,25 +431,23 @@ def run_goal_alignment_chatbot(): - Highlight one key insight from the report about how the employee aligns (or not) with their manager in less than 100 words. - Suggest a concrete measure for {employee_name} to fix this, making reference to the text they have now in their own goal sheet and how it differs from that of their manager's. - Only use the information in {final_recommendations}. - """ - ) - initial_message = query_llm(initial_prompt, { - "employee_name": employee_name, - "final_recommendations": compiled_report["final_recommendations"] - }) - st.session_state.ga_chat_history = [{"Chatbot": initial_message}] + """ + ) + initial_message = query_llm(initial_prompt, { + "employee_name": employee_name, + "final_recommendations": compiled_report["final_recommendations"] + }) + st.session_state.ga_chat_history = [{"Chatbot": initial_message}] - except Exception as e: - st.error(f"Error: {e}") - finally: - if connection: - connection.close() + except Exception as e: + st.error(f"Error during report generation or LLM query: {e}") # --- Chat Interface --- if "ga_report" in st.session_state: st.subheader("Chat with your Career Mentor") employee_name = st.session_state.ga_employee_name + # ✅ Always show chat history for exchange in st.session_state.ga_chat_history: if "User" in exchange: with st.chat_message("user", avatar="🧑‍💼"): @@ -152,24 +456,36 @@ def run_goal_alignment_chatbot(): with st.chat_message("assistant", avatar="🤖"): st.markdown(exchange["Chatbot"]) + # ✅ If chat is still active, handle input if not st.session_state.ga_conversation_complete: - user_msg = st.chat_input("You:", key="ga_user_input") - if user_msg: + user_msg = st.chat_input("You:") + + # Step 1: Store input and rerun + if user_msg and "ga_pending_input" not in st.session_state: st.session_state.ga_chat_history.append({"User": user_msg}) + st.session_state.ga_pending_input = user_msg + st.rerun() - if any(phrase in user_msg.strip().lower() for phrase in [ + # Step 2: Process after rerun + if "ga_pending_input" in st.session_state: + user_msg = st.session_state.ga_pending_input + del st.session_state.ga_pending_input + + # Check for end phrases + if any(phrase in user_msg.lower().strip() for phrase in [ "no thanks", "no thank you", "i'm good", "im good", "no i'm good", "no im good", "that's all", "we're done", - "we are done", "stop", "all set"]): - closing = "I hope you're feeling more confident in refining your goal sheet. Best of luck as you continue developing your plan!" - st.session_state.ga_chat_history.append({"Chatbot": closing}) + "we are done", "stop", "all set" + ]): + st.session_state.ga_chat_history.append({ + "Chatbot": "I hope you're feeling more confident in refining your goal sheet. Best of luck as you continue developing your plan!" + }) st.session_state.ga_conversation_complete = True st.rerun() - - prompt = PromptTemplate( - input_variables=["employee_name", "chat_input", "chat_history", "full_report"], - template=""" -You are a helpful career mentor chatbot coaching {employee_name} on goal alignment. + else: + chat_prompt = PromptTemplate( + input_variables=["employee_name", "chat_input", "chat_history", "full_report"], + template="""You are a helpful career mentor chatbot coaching {employee_name} on goal alignment. Here is the previous chat history: {chat_history} @@ -188,43 +504,35 @@ def run_goal_alignment_chatbot(): - If there's a lack of agency in what they last said, suggest something to help them refine this goal but you're moving on regardless! - Don’t repeat topics already covered in chat history. - Don't ask open ended questions unless there's really no other option. -- - Give the impression that you're reducing the user's cognitive load, not adding to it. - """ - ) - response = query_llm(prompt, { - "employee_name": employee_name, - "chat_input": user_msg, - "chat_history": json.dumps(st.session_state.ga_chat_history, indent=2), - "full_report": json.dumps(st.session_state.ga_report, indent=2) - }) - - st.session_state.ga_chat_history.append({"Chatbot": response}) - if is_refinement_suggestion(response): - possible_goal = extract_possible_goal(response) - add_goal_refinement(possible_goal, response) - st.rerun() +- Give the impression that you're reducing the user's cognitive load, not adding to it. + """ + ) + response = query_llm(chat_prompt, { + "employee_name": employee_name, + "chat_input": user_msg, + "chat_history": json.dumps(st.session_state.ga_chat_history, indent=2), + "full_report": json.dumps(st.session_state.ga_report, indent=2) + }) - if st.button("Stop Conversation") and not st.session_state.ga_conversation_complete: - st.session_state.ga_chat_history.append({ - "Chatbot": "I hope you're feeling more confident in refining your goal sheet. Best of luck as you continue developing your plan!" - }) - st.session_state.ga_conversation_complete = True - st.rerun() + st.session_state.ga_chat_history.append({"Chatbot": response}) + + goal_lines = extract_goal_lines(response) + for line in goal_lines: + structured = extract_structured_goal(line) + if structured: + _deduped_append(structured) + st.rerun() + + st.button( + "🚦 End Conversation & Review Goals", + on_click=_finalise_conversation, + disabled=st.session_state.ga_conversation_complete, key="ga_end_conversation_btn" +) + + # ---------- POST-CHAT TOOLS ---------- + if st.session_state.ga_conversation_complete: + render_goal_review_tools() # see next snippet - if st.session_state.ga_conversation_complete and st.session_state.ga_chat_history: - transcript_file = generate_transcript_docx(st.session_state.ga_chat_history, employee_name) - st.download_button( - label="📥 Download Conversation Transcript", - data=transcript_file, - file_name=f"{employee_name}_GoalAlignmentChat_transcript.docx", - mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document", - key="ga_download_btn" - ) - if st.session_state.ga_conversation_complete and st.session_state.ga_goal_refinements: - st.markdown("### ✍️ Apply Refined Goals to the Database") - st.info("Below are refined objectives suggested during your chat. You can apply them to the goal sheet.") - for idx, refinement in enumerate(st.session_state.ga_goal_refinements): - render_refinement_action(refinement, idx) -run_goal_alignment_chatbot() +run_goal_alignment_chatbot() \ No newline at end of file diff --git a/ai/generative-ai-service/hr-goal-alignment/files/pages/manager_meeting_chatbot.py b/ai/generative-ai-service/hr-goal-alignment/files/pages/manager_meeting_chatbot.py index 97ba6f14c..896f2d214 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/pages/manager_meeting_chatbot.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/pages/manager_meeting_chatbot.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import streamlit as st import oracledb import config @@ -12,8 +11,6 @@ def run_meeting_preparation_chatbot(): employee_name = st.text_input("Enter Employee Name:", key="prep_employee_input") if employee_name: - connection = None - cursor = None try: with st.spinner("Connecting and fetching data..."): connection = oracledb.connect( @@ -129,9 +126,9 @@ def read_lob(lob): return lob.read() if lob else "N/A" except Exception as e: st.error(f"Unexpected error: {e}") finally: - if cursor: cursor.close() - if connection: connection.close() + if 'cursor' in locals(): cursor.close() + if 'connection' in locals(): connection.close() -run_meeting_preparation_chatbot() +run_meeting_preparation_chatbot() \ No newline at end of file diff --git a/ai/generative-ai-service/hr-goal-alignment/files/pages/self_assessment_chatbot.py b/ai/generative-ai-service/hr-goal-alignment/files/pages/self_assessment_chatbot.py index 34a5aa8f0..199deccac 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/pages/self_assessment_chatbot.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/pages/self_assessment_chatbot.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import streamlit as st import json from langchain.prompts import PromptTemplate @@ -41,7 +40,6 @@ def run_self_assessment_chatbot(): st.warning("Please enter an employee name.") return - connection = None try: with st.spinner("Connecting to database and gathering data..."): connection = oracledb.connect( @@ -98,7 +96,7 @@ def run_self_assessment_chatbot(): except Exception as e: st.error(f"Error: {e}") finally: - if connection: + if 'connection' in locals(): connection.close() if st.session_state.sa_report: @@ -114,7 +112,7 @@ def run_self_assessment_chatbot(): if st.button("Stop Conversation", key="sa_stop_btn"): st.session_state.sa_conversation_complete = True - if not st.session_state.sa_chat_history[-1].get("Chatbot", "").startswith("Got it! Best of luck"): # type: ignore + if not st.session_state.sa_chat_history[-1].get("Chatbot", "").startswith("Got it! Best of luck"): st.session_state.sa_chat_history.append( {"Chatbot": "Okay, ending the conversation here. Best of luck!"} ) diff --git a/ai/generative-ai-service/hr-goal-alignment/files/requirements.txt b/ai/generative-ai-service/hr-goal-alignment/files/requirements.txt index f7c82ce80..a71e433db 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/requirements.txt +++ b/ai/generative-ai-service/hr-goal-alignment/files/requirements.txt @@ -18,6 +18,8 @@ circuitbreaker==1.4.0 click==8.1.8 cryptography==44.0.1 dataclasses-json==0.6.7 +docx +emoji==2.14.1 et_xmlfile==2.0.0 eval_type_backport==0.2.2 filetype==1.2.0 @@ -26,6 +28,7 @@ gitdb==4.0.12 GitPython==3.1.44 graphviz==0.20.3 h11==0.14.0 +html5lib==1.1 httpcore==1.0.7 httpx==0.28.1 httpx-sse==0.4.0 @@ -42,6 +45,7 @@ langchain-core==0.3.49 langchain-text-splitters==0.3.7 langdetect==1.0.9 langsmith==0.3.19 +lxml==5.3.2 MarkupSafe==3.0.2 marshmallow==3.26.1 multidict==6.2.0 @@ -55,6 +59,7 @@ oci==2.149.1 olefile==0.47 openpyxl==3.1.5 oracledb==3.0.0 +orjson==3.10.16 packaging==24.2 pandas==2.2.3 pillow==11.1.0 @@ -69,9 +74,13 @@ pydantic_core==2.33.1 pydantic-settings==2.8.1 pydeck==0.9.1 pyOpenSSL==24.3.0 +pypdf==5.4.0 python-dateutil==2.9.0.post0 +python-docx==1.1.2 python-dotenv==1.1.0 python-iso639==2025.2.18 +python-magic==0.4.27 +python-oxmsg==0.0.2 pytz==2025.2 PyYAML==6.0.2 RapidFuzz==3.13.0 @@ -95,8 +104,11 @@ typing_extensions==4.13.0 typing-inspect==0.9.0 typing-inspection==0.4.0 tzdata==2025.2 +unstructured==0.17.2 +unstructured-client==0.34.0 urllib3==2.3.0 webencodings==0.5.1 wrapt==1.17.2 xlrd==2.0.1 yarl==1.18.3 +zstandard==0.23.0 diff --git a/ai/generative-ai-service/hr-goal-alignment/files/scripts/create_tables.py b/ai/generative-ai-service/hr-goal-alignment/files/scripts/create_tables.py index ffac7c21b..b7479fe8b 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/scripts/create_tables.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/scripts/create_tables.py @@ -1,13 +1,9 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import oracledb import sys import os sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import config - -connection = None -cursor = None try: # Create a connection to the database connection = oracledb.connect( @@ -83,8 +79,8 @@ finally: # Close the cursor and connection - if cursor: + if 'cursor' in locals(): cursor.close() - if connection: + if 'connection' in locals(): connection.close() print("Connection closed") diff --git a/ai/generative-ai-service/hr-goal-alignment/files/scripts/populate_demo_data.py b/ai/generative-ai-service/hr-goal-alignment/files/scripts/populate_demo_data.py index 688a54080..103a71fcf 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/scripts/populate_demo_data.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/scripts/populate_demo_data.py @@ -1,4 +1,3 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import oracledb import os import sys diff --git a/ai/generative-ai-service/hr-goal-alignment/files/utils.py b/ai/generative-ai-service/hr-goal-alignment/files/utils.py index f21dfdd54..89c127249 100644 --- a/ai/generative-ai-service/hr-goal-alignment/files/utils.py +++ b/ai/generative-ai-service/hr-goal-alignment/files/utils.py @@ -1,8 +1,8 @@ -# Copyright (c) 2025 Oracle and/or its affiliates. import streamlit as st from io import BytesIO from docx import Document import oracledb +from langchain.prompts import PromptTemplate # ----------------------- # Database Helpers