|
34 | 34 | sys.path.append(str(ROOT_DIR)) |
35 | 35 |
|
36 | 36 | from app.services.evaluator_service import EvaluatorService |
37 | | -from app.models.request_models import SynthesisRequest, EvaluationRequest, Export_synth, ModelParameters, CustomPromptRequest, JsonDataSize, RelativePath |
| 37 | +from app.models.request_models import SynthesisRequest, EvaluationRequest, Export_synth, ModelParameters, CustomPromptRequest, JsonDataSize, RelativePath, Technique |
38 | 38 | from app.services.synthesis_service import SynthesisService |
39 | 39 | from app.services.export_results import Export_Service |
40 | 40 |
|
@@ -935,6 +935,149 @@ async def get_model_parameters() -> Dict: |
935 | 935 |
|
936 | 936 |
|
937 | 937 |
|
| 938 | +@app.post("/complete_gen_prompt") |
| 939 | +async def complete_prompt(request: SynthesisRequest): |
| 940 | + """Allow users to see whole prompt which goes finally into LLM""" |
| 941 | + try: |
| 942 | + topic = request.topics[0] |
| 943 | + batch_size = 5 if request.num_questions>=5 else request.num_questions |
| 944 | + omit_questions = [] |
| 945 | + |
| 946 | + if request.technique == Technique.Freeform: |
| 947 | + prompt = PromptBuilder.build_freeform_prompt( |
| 948 | + model_id=request.model_id, |
| 949 | + use_case=request.use_case, |
| 950 | + topic=topic, |
| 951 | + num_questions=batch_size, |
| 952 | + omit_questions=omit_questions, |
| 953 | + example_custom=request.example_custom or [], |
| 954 | + example_path=request.example_path, |
| 955 | + custom_prompt=request.custom_prompt, |
| 956 | + schema=request.schema, |
| 957 | + ) |
| 958 | + elif request.technique == Technique.Custom_Workflow: |
| 959 | + |
| 960 | + inputs = [] |
| 961 | + path = None # Initialize path |
| 962 | + |
| 963 | + try: |
| 964 | + if not request.input_path: |
| 965 | + raise ValueError("input_path must not be empty or None") |
| 966 | + if not isinstance(request.input_path, (list, tuple)): |
| 967 | + # Or handle a single string case if needed, e.g., path = request.input_path |
| 968 | + raise TypeError("input_path must be a list or tuple of paths") |
| 969 | + if not request.input_path[0]: |
| 970 | + raise ValueError("First path in input_path is empty") |
| 971 | + |
| 972 | + path = request.input_path[0] |
| 973 | + |
| 974 | + except (ValueError, TypeError, IndexError) as e: # Catch specific errors for clarity |
| 975 | + # Raise appropriate HTTP exception for bad request data |
| 976 | + raise HTTPException(status_code=400, detail=f"Invalid input_path: {str(e)}") |
| 977 | + except Exception as e: # Catch any other unexpected errors getting the path |
| 978 | + raise HTTPException(status_code=500, detail=f"Unexpected error getting input path: {str(e)}") |
| 979 | + |
| 980 | + |
| 981 | + # Proceed only if path was successfully retrieved |
| 982 | + try: |
| 983 | + with open(path) as f: |
| 984 | + data = json.load(f) |
| 985 | + |
| 986 | + # Assuming data is a list of dicts |
| 987 | + if not isinstance(data, list): |
| 988 | + raise ValueError(f"Expected JSON data in {path} to be a list, but got {type(data).__name__}") |
| 989 | + |
| 990 | + inputs.extend(item.get(request.input_key, '') for item in data if isinstance(item, dict)) # Ensure item is a dict |
| 991 | + |
| 992 | + except FileNotFoundError: |
| 993 | + raise HTTPException(status_code=404, detail=f"Input file not found: {path}") |
| 994 | + except json.JSONDecodeError: |
| 995 | + raise HTTPException(status_code=400, detail=f"Invalid JSON in file: {path}") |
| 996 | + except ValueError as e: # For the list/dict structure check |
| 997 | + raise HTTPException(status_code=400, detail=f"Invalid data structure in {path}: {str(e)}") |
| 998 | + except Exception as e: # Catch any other unexpected file processing errors |
| 999 | + raise HTTPException(status_code=500, detail=f"Error processing file {path}: {str(e)}") |
| 1000 | + |
| 1001 | + |
| 1002 | + # Check if inputs list is empty before accessing index 0 |
| 1003 | + if not inputs: |
| 1004 | + # Raise an error indicating no data was extracted based on the key |
| 1005 | + raise HTTPException(status_code=400, detail=f"No data extracted from {path} using key '{request.input_key}'. The file might be empty, the key might not exist, or the JSON structure is unexpected.") |
| 1006 | + |
| 1007 | + input_data = inputs[0] |
| 1008 | + |
| 1009 | + prompt = PromptBuilder.build_generate_result_prompt( |
| 1010 | + model_id=request.model_id, |
| 1011 | + use_case=request.use_case, |
| 1012 | + input=input_data, |
| 1013 | + examples=request.examples or [], |
| 1014 | + schema=request.schema, |
| 1015 | + custom_prompt=request.custom_prompt, |
| 1016 | + ) |
| 1017 | + elif request.technique == Technique.SFT: |
| 1018 | + prompt = PromptBuilder.build_prompt( |
| 1019 | + model_id=request.model_id, |
| 1020 | + use_case=request.use_case, |
| 1021 | + topic=topic, |
| 1022 | + num_questions=batch_size, |
| 1023 | + omit_questions=omit_questions, |
| 1024 | + examples=request.examples or [], |
| 1025 | + technique=request.technique, |
| 1026 | + schema=request.schema, |
| 1027 | + custom_prompt=request.custom_prompt, |
| 1028 | + ) |
| 1029 | + |
| 1030 | + return {"complete_prompt":prompt} |
| 1031 | + |
| 1032 | + except Exception as e: |
| 1033 | + raise HTTPException(status_code=500, detail=str(e)) |
| 1034 | + |
| 1035 | +@app.post("/complete_eval_prompt") |
| 1036 | +async def complete_prompt(request: EvaluationRequest): |
| 1037 | + """Allow users to see whole prompt which goes finally into LLM""" |
| 1038 | + try: |
| 1039 | + |
| 1040 | + |
| 1041 | + if request.technique == Technique.Freeform: |
| 1042 | + with open(request.import_path, 'r') as file: |
| 1043 | + data = json.load(file) |
| 1044 | + |
| 1045 | + # Ensure data is a list of rows |
| 1046 | + rows = data if isinstance(data, list) else [data] |
| 1047 | + prompt = PromptBuilder.build_freeform_eval_prompt( |
| 1048 | + request.model_id, |
| 1049 | + request.use_case, |
| 1050 | + rows[0], |
| 1051 | + request.examples, |
| 1052 | + request.custom_prompt |
| 1053 | + ) |
| 1054 | + |
| 1055 | + elif request.technique == Technique.SFT or request.technique == Technique.Custom_Workflow: |
| 1056 | + |
| 1057 | + with open(request.import_path, 'r') as file: |
| 1058 | + data = json.load(file) |
| 1059 | + qa_pairs = [{ |
| 1060 | + request.output_key: item.get(request.output_key, ''), # Use get() with default value |
| 1061 | + request.output_value: item.get(request.output_value, '') # Use get() with default value |
| 1062 | + } for item in data] |
| 1063 | + qa_pair = qa_pairs[0] |
| 1064 | + prompt = PromptBuilder.build_eval_prompt( |
| 1065 | + request.model_id, |
| 1066 | + request.use_case, |
| 1067 | + qa_pair[request.output_key], |
| 1068 | + qa_pair[request.output_value], |
| 1069 | + request.examples, |
| 1070 | + request.custom_prompt |
| 1071 | + ) |
| 1072 | + |
| 1073 | + return {"complete_prompt":prompt} |
| 1074 | + |
| 1075 | + except Exception as e: |
| 1076 | + raise HTTPException(status_code=500, detail=str(e)) |
| 1077 | + |
| 1078 | + |
| 1079 | + |
| 1080 | + |
938 | 1081 |
|
939 | 1082 | @app.get("/{use_case}/gen_prompt") |
940 | 1083 | async def customise_prompt(use_case: UseCase): |
|
0 commit comments