diff --git a/tests/test-progress-feature.cpp b/tests/test-progress-feature.cpp new file mode 100644 index 0000000000000..74e7e05452ea7 --- /dev/null +++ b/tests/test-progress-feature.cpp @@ -0,0 +1,314 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#include +#pragma comment(lib, "ws2_32.lib") +#else +#include +#include +#include +#include +#include +#endif + +class ProgressFeatureTest { +private: + std::string server_url; + int server_port; + + std::string create_long_prompt() { + return R"(Please provide a comprehensive analysis of artificial intelligence and machine learning, including but not limited to: + +1. Historical Development: Trace the evolution of AI from its early beginnings in the 1950s through the various AI winters and recent breakthroughs. Discuss key milestones such as the Dartmouth Conference, expert systems, neural networks, and deep learning. + +2. Machine Learning Fundamentals: Explain the core concepts of supervised learning, unsupervised learning, and reinforcement learning. Describe different types of algorithms including decision trees, support vector machines, neural networks, and ensemble methods. + +3. Deep Learning Revolution: Detail the resurgence of neural networks through deep learning, including convolutional neural networks (CNNs) for computer vision, recurrent neural networks (RNNs) and transformers for natural language processing, and generative adversarial networks (GANs). + +4. Natural Language Processing: Discuss the evolution from rule-based systems to statistical methods to neural approaches. Cover topics like word embeddings, sequence-to-sequence models, attention mechanisms, and large language models like GPT, BERT, and their successors. + +5. Computer Vision: Explore the development of computer vision from traditional image processing to deep learning approaches. Discuss object detection, image segmentation, face recognition, and recent advances in vision transformers. + +6. Applications and Impact: Analyze how AI is transforming various industries including healthcare, finance, transportation, education, and entertainment. Discuss both the benefits and potential risks of AI deployment. + +7. Ethical Considerations: Address important ethical issues such as bias in AI systems, privacy concerns, job displacement, and the need for responsible AI development and deployment. + +8. Future Directions: Speculate on emerging trends in AI research, including multimodal AI, few-shot learning, explainable AI, and the pursuit of artificial general intelligence (AGI). + +Please provide detailed explanations with specific examples and technical details where appropriate. This should be a thorough, academic-level analysis suitable for someone with a background in computer science or related fields.)"; + } + + std::string create_completion_request(const std::string& prompt, bool return_progress = true) { + std::ostringstream oss; + oss << "POST /completion HTTP/1.1\r\n"; + oss << "Host: localhost:" << server_port << "\r\n"; + oss << "Content-Type: application/json\r\n"; + oss << "Connection: close\r\n"; + + std::string json_body = "{" + "\"prompt\": \"" + escape_json_string(prompt) + "\"," + "\"stream\": true," + "\"return_progress\": " + (return_progress ? "true" : "false") + "," + "\"max_tokens\": 20," + "\"temperature\": 0.7" + "}"; + + oss << "Content-Length: " << json_body.length() << "\r\n"; + oss << "\r\n"; + oss << json_body; + + return oss.str(); + } + + std::string create_chat_completion_request(const std::string& prompt, bool return_progress = true) { + std::ostringstream oss; + oss << "POST /v1/chat/completions HTTP/1.1\r\n"; + oss << "Host: localhost:" << server_port << "\r\n"; + oss << "Content-Type: application/json\r\n"; + oss << "Connection: close\r\n"; + + std::string json_body = "{" + "\"model\": \"test\"," + "\"messages\": [{\"role\": \"user\", \"content\": \"" + escape_json_string(prompt) + "\"}]," + "\"stream\": true," + "\"return_progress\": " + (return_progress ? "true" : "false") + "," + "\"max_tokens\": 20," + "\"temperature\": 0.7" + "}"; + + oss << "Content-Length: " << json_body.length() << "\r\n"; + oss << "\r\n"; + oss << json_body; + + return oss.str(); + } + + std::string escape_json_string(const std::string& str) { + std::string result; + for (char c : str) { + if (c == '"' || c == '\\' || c == '\n' || c == '\r' || c == '\t') { + result += '\\'; + switch (c) { + case '"': result += '"'; break; + case '\\': result += '\\'; break; + case '\n': result += 'n'; break; + case '\r': result += 'r'; break; + case '\t': result += 't'; break; + } + } else { + result += c; + } + } + return result; + } + + bool send_http_request(const std::string& request, std::string& response) { + int sock = socket(AF_INET, SOCK_STREAM, 0); + if (sock < 0) { + std::cerr << "Failed to create socket" << std::endl; + return false; + } + + struct sockaddr_in server_addr; + server_addr.sin_family = AF_INET; + server_addr.sin_port = htons(server_port); + server_addr.sin_addr.s_addr = inet_addr("127.0.0.1"); + + if (connect(sock, (struct sockaddr*)&server_addr, sizeof(server_addr)) < 0) { + std::cerr << "Failed to connect to server" << std::endl; + close(sock); + return false; + } + + if (send(sock, request.c_str(), request.length(), 0) < 0) { + std::cerr << "Failed to send request" << std::endl; + close(sock); + return false; + } + + char buffer[4096]; + response.clear(); + + while (true) { + int bytes_received = recv(sock, buffer, sizeof(buffer) - 1, 0); + if (bytes_received <= 0) break; + + buffer[bytes_received] = '\0'; + response += buffer; + } + + close(sock); + return true; + } + + bool parse_progress_responses(const std::string& response, std::vector& progress_responses, std::vector& content_responses) { + std::istringstream iss(response); + std::string line; + + while (std::getline(iss, line)) { + if (line.substr(0, 6) == "data: ") { + std::string data = line.substr(6); + if (data.find("\"n_prompt_tokens_processed\"") != std::string::npos || data.find("\"progress\"") != std::string::npos) { + progress_responses.push_back(data); + } else if (data.find("\"content\"") != std::string::npos || data.find("\"choices\"") != std::string::npos) { + content_responses.push_back(data); + } + } + } + + return true; + } + + bool check_progress_completion(const std::vector& progress_responses) { + if (progress_responses.empty()) { + return false; + } + + // Check if the last progress response shows 100% completion + std::string last_response = progress_responses.back(); + return last_response.find("\"progress\":1.0") != std::string::npos; + } + +public: + ProgressFeatureTest(int port = 8081) : server_port(port) {} + + bool test_completion_endpoint_progress() { + std::cout << "\n=== Testing /completion endpoint progress ===" << std::endl; + + std::string prompt = create_long_prompt(); + std::string request = create_completion_request(prompt, true); + std::string response; + + if (!send_http_request(request, response)) { + std::cout << "Failed to send request" << std::endl; + return false; + } + + std::vector progress_responses, content_responses; + parse_progress_responses(response, progress_responses, content_responses); + + std::cout << "Received " << progress_responses.size() << " progress responses" << std::endl; + std::cout << "Received " << content_responses.size() << " content responses" << std::endl; + + if (check_progress_completion(progress_responses)) { + std::cout << "Progress reached 100% as expected" << std::endl; + return true; + } else { + std::cout << "Progress did not reach 100%" << std::endl; + return false; + } + } + + bool test_chat_completion_endpoint_progress() { + std::cout << "\n=== Testing /v1/chat/completions endpoint progress ===" << std::endl; + + std::string prompt = create_long_prompt(); + std::string request = create_chat_completion_request(prompt, true); + std::string response; + + if (!send_http_request(request, response)) { + std::cout << "Failed to send request" << std::endl; + return false; + } + + std::vector progress_responses, content_responses; + parse_progress_responses(response, progress_responses, content_responses); + + std::cout << "Received " << progress_responses.size() << " progress responses" << std::endl; + std::cout << "Received " << content_responses.size() << " content responses" << std::endl; + + if (check_progress_completion(progress_responses)) { + std::cout << "Progress reached 100% as expected" << std::endl; + return true; + } else { + std::cout << "Progress did not reach 100%" << std::endl; + return false; + } + } + + bool test_progress_disabled() { + std::cout << "\n=== Testing progress disabled ===" << std::endl; + + std::string prompt = create_long_prompt(); + std::string request = create_completion_request(prompt, false); + std::string response; + + if (!send_http_request(request, response)) { + std::cout << "Failed to send request" << std::endl; + return false; + } + + std::vector progress_responses, content_responses; + parse_progress_responses(response, progress_responses, content_responses); + + std::cout << "Received " << progress_responses.size() << " progress responses (should be 0)" << std::endl; + std::cout << "Received " << content_responses.size() << " content responses" << std::endl; + + if (progress_responses.empty()) { + std::cout << "No progress responses when disabled, as expected" << std::endl; + return true; + } else { + std::cout << "Progress responses received when disabled" << std::endl; + return false; + } + } + + bool run_all_tests() { + std::cout << "Starting Progress Feature Tests" << std::endl; + std::cout << "==================================================" << std::endl; + + std::vector>> tests = { + {"Completion endpoint progress", [this]() { return test_completion_endpoint_progress(); }}, + {"Chat completion endpoint progress", [this]() { return test_chat_completion_endpoint_progress(); }}, + {"Progress disabled", [this]() { return test_progress_disabled(); }}, + }; + + int passed = 0; + int total = tests.size(); + + for (const auto& test : tests) { + std::cout << "\n==================== " << test.first << " ====================" << std::endl; + if (test.second()) { + std::cout << "PASSED" << std::endl; + passed++; + } else { + std::cout << "FAILED" << std::endl; + } + } + + std::cout << "\n==================================================" << std::endl; + std::cout << "Test Results: " << passed << "/" << total << " tests passed" << std::endl; + + return passed == total; + } +}; + +int main(int argc, char* argv[]) { + if (argc != 1) { + std::cout << "Usage: " << argv[0] << std::endl; + std::cout << "Make sure the server is running on localhost:8081" << std::endl; + return 1; + } + + ProgressFeatureTest tester; + bool success = tester.run_all_tests(); + + if (success) { + std::cout << "\nAll tests passed!" << std::endl; + return 0; + } else { + std::cout << "\nSome tests failed!" << std::endl; + return 1; + } +} \ No newline at end of file diff --git a/tests/test-progress-feature.py b/tests/test-progress-feature.py new file mode 100644 index 0000000000000..3a65da920ac4c --- /dev/null +++ b/tests/test-progress-feature.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python3 + +import requests +import json +import sys +import time + +def create_long_prompt(): + """Create a very long prompt to ensure multiple batches are processed""" + # Create a much longer prompt that will definitely take multiple batches + # This will help us clearly see the progress effect + base_text = "This is a comprehensive test prompt designed to verify the progress functionality thoroughly. " * 200 + return base_text + +def test_completion_endpoint_progress(server_url): + """Test progress functionality on /completion endpoint with long prompt""" + print("\n=== Testing /completion endpoint progress ===") + print("Using a very long prompt to clearly demonstrate progress...") + + prompt = create_long_prompt() + print(f"Prompt length: {len(prompt)} characters") + + data = { + "prompt": prompt, + "stream": True, + "return_progress": True, + "max_tokens": 10, # Small number to focus on prompt processing + "temperature": 0.7 + } + + progress_responses = [] + content_responses = [] + + try: + print("Sending request...") + response = requests.post(f"{server_url}/completion", json=data, stream=True) + response.raise_for_status() + + print("Receiving streaming response...") + for line in response.iter_lines(): + if line: + line_str = line.decode('utf-8') + if line_str.startswith('data: '): + data_str = line_str[6:] # Remove 'data: ' prefix + if data_str.strip() == '[DONE]': + break + + try: + json_data = json.loads(data_str) + if 'prompt_processing' in json_data: + progress_responses.append(json_data['prompt_processing']) + progress = json_data['prompt_processing'] + percentage = progress.get('progress', 0) * 100 + print(f"Progress: {percentage:.1f}% ({progress.get('n_prompt_tokens_processed', 'N/A')}/{progress.get('n_prompt_tokens', 'N/A')})") + elif 'content' in json_data and json_data.get('content', ''): + content_responses.append(json_data) + except json.JSONDecodeError: + continue + + print(f"\nReceived {len(progress_responses)} progress responses") + print(f"Received {len(content_responses)} content responses") + + # Detailed analysis + if progress_responses: + print("\n=== Progress Analysis ===") + for i, progress in enumerate(progress_responses): + percentage = progress.get('progress', 0) * 100 + processed = progress.get('n_prompt_tokens_processed', 0) + total = progress.get('n_prompt_tokens', 0) + print(f" Progress {i+1}: {percentage:.1f}% ({processed}/{total})") + + # Check if we reached 100% + last_progress = progress_responses[-1].get('progress', 0) + if last_progress >= 0.99: # Allow for small floating point differences + print("✅ Progress reached 100% as expected") + return True + else: + print(f"❌ Progress did not reach 100% (last: {last_progress*100:.1f}%)") + return False + else: + print("❌ No progress responses received") + return False + + except Exception as e: + print(f"Error: {e}") + return False + +def test_progress_disabled(server_url): + """Test that progress is not sent when return_progress is false""" + print("\n=== Testing progress disabled ===") + + prompt = create_long_prompt() + + data = { + "prompt": prompt, + "stream": True, + "return_progress": False, # Disable progress + "max_tokens": 10, + "temperature": 0.7 + } + + progress_responses = [] + content_responses = [] + + try: + print("Sending request with progress disabled...") + response = requests.post(f"{server_url}/completion", json=data, stream=True) + response.raise_for_status() + + for line in response.iter_lines(): + if line: + line_str = line.decode('utf-8') + if line_str.startswith('data: '): + data_str = line_str[6:] # Remove 'data: ' prefix + if data_str.strip() == '[DONE]': + break + + try: + json_data = json.loads(data_str) + if 'prompt_processing' in json_data: + progress_responses.append(json_data['prompt_processing']) + elif 'content' in json_data and json_data.get('content', ''): + content_responses.append(json_data) + except json.JSONDecodeError: + continue + + print(f"Received {len(progress_responses)} progress responses") + print(f"Received {len(content_responses)} content responses") + + # Check that no progress responses were received + if len(progress_responses) == 0: + print("✅ No progress responses received when disabled (correct)") + return True + else: + print("❌ Progress responses received when disabled (incorrect)") + return False + + except Exception as e: + print(f"Error: {e}") + return False + +def test_batch_size_effect(server_url): + """Test the effect of different batch sizes on progress reporting""" + print("\n=== Testing batch size effect ===") + + prompt = create_long_prompt() + + # Test with different batch sizes + batch_sizes = [16, 32, 64] + + for batch_size in batch_sizes: + print(f"\nTesting with batch size: {batch_size}") + + data = { + "prompt": prompt, + "stream": True, + "return_progress": True, + "max_tokens": 10, + "temperature": 0.7 + } + + progress_responses = [] + + try: + # Note: We can't directly set batch_size in the request, but we can observe the effect + # by counting progress responses - smaller batch sizes should result in more progress updates + response = requests.post(f"{server_url}/completion", json=data, stream=True) + response.raise_for_status() + + for line in response.iter_lines(): + if line: + line_str = line.decode('utf-8') + if line_str.startswith('data: '): + data_str = line_str[6:] + if data_str.strip() == '[DONE]': + break + + try: + json_data = json.loads(data_str) + if 'prompt_processing' in json_data: + progress_responses.append(json_data['prompt_processing']) + except json.JSONDecodeError: + continue + + print(f" Progress responses: {len(progress_responses)}") + + except Exception as e: + print(f" Error: {e}") + continue + + print("✅ Batch size effect test completed") + return True + +def main(): + if len(sys.argv) != 2: + print("Usage: python3 test-progress-feature.py ") + print("Example: python3 test-progress-feature.py http://localhost:8081") + sys.exit(1) + + server_url = sys.argv[1] + + print("Testing progress feature with comprehensive test cases...") + print(f"Server URL: {server_url}") + print("This test uses a very long prompt to clearly demonstrate progress functionality.") + + # Wait a moment for server to be ready + time.sleep(2) + + # Run tests + test1_passed = test_completion_endpoint_progress(server_url) + test2_passed = test_progress_disabled(server_url) + test3_passed = test_batch_size_effect(server_url) + + # Summary + print("\n=== Test Summary ===") + print(f"Completion endpoint progress: {'✅ PASS' if test1_passed else '❌ FAIL'}") + print(f"Progress disabled: {'✅ PASS' if test2_passed else '❌ FAIL'}") + print(f"Batch size effect: {'✅ PASS' if test3_passed else '❌ FAIL'}") + + if test1_passed and test2_passed and test3_passed: + print("\n🎉 All tests passed!") + print("The progress feature is working correctly with long prompts and small batch sizes.") + sys.exit(0) + else: + print("\n💥 Some tests failed!") + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tools/server/README.md b/tools/server/README.md index e29511cb1b457..31ccceb843ca3 100644 --- a/tools/server/README.md +++ b/tools/server/README.md @@ -428,6 +428,8 @@ By default, this value is set to `0`, meaning no tokens are kept. Use `-1` to re `stream`: Allows receiving each predicted token in real-time instead of waiting for the completion to finish (uses a different response format). To enable this, set to `true`. +`return_progress`: When `stream` is enabled, this option allows receiving prompt processing progress information before the text generation begins. The progress responses contain a `prompt_processing` field with details about the number of tokens processed and overall progress. This is useful for long prompts where users want to see evaluation progress instead of waiting silently. Default: `false` (only applies when `stream` is `true`). + `stop`: Specify a JSON array of stopping strings. These words will not be included in the completion, so make sure to add them to the prompt for the next iteration. Default: `[]` diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 0afe213af1e47..7e33f82e57d12 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -109,9 +109,10 @@ static bool server_task_type_need_logits(server_task_type task_type) { } struct slot_params { - bool stream = true; - bool cache_prompt = true; // remember the prompt to avoid reprocessing all prompt - bool return_tokens = false; + bool stream = true; + bool cache_prompt = true; // remember the prompt to avoid reprocessing all prompt + bool return_tokens = false; + bool return_progress = false; // include prompt processing progress in streaming responses int32_t n_keep = 0; // number of tokens to keep from initial prompt int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half @@ -258,9 +259,10 @@ struct server_task { params.verbose = params_base.verbosity > 9; params.timings_per_token = json_value(data, "timings_per_token", false); - params.stream = json_value(data, "stream", false); - params.cache_prompt = json_value(data, "cache_prompt", true); - params.return_tokens = json_value(data, "return_tokens", false); + params.stream = json_value(data, "stream", false); + params.cache_prompt = json_value(data, "cache_prompt", true); + params.return_tokens = json_value(data, "return_tokens", false); + params.return_progress = json_value(data, "return_progress", false); params.n_predict = json_value(data, "n_predict", json_value(data, "max_tokens", defaults.n_predict)); params.n_indent = json_value(data, "n_indent", defaults.n_indent); params.n_keep = json_value(data, "n_keep", defaults.n_keep); @@ -898,6 +900,12 @@ struct server_task_result_cmpl_partial : server_task_result { completion_token_output prob_output; result_timings timings; + // Progress fields (only populated when is_progress_response is true) + bool is_progress_response = false; + int32_t n_past = 0; + int32_t n_prompt_tokens_processed = 0; + float progress = 0.0f; + // OAI-compat fields bool verbose = false; oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; @@ -944,6 +952,15 @@ struct server_task_result_cmpl_partial : server_task_result { if (!prob_output.probs.empty()) { res["completion_probabilities"] = completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs); } + // include prompt processing progress if this is a progress response + if (is_progress_response) { + res["prompt_processing"] = json { + {"n_past", n_past}, + {"n_prompt_tokens", n_prompt_tokens}, + {"n_prompt_tokens_processed", n_prompt_tokens_processed}, + {"progress", progress}, + }; + } return res; } @@ -983,6 +1000,31 @@ struct server_task_result_cmpl_partial : server_task_result { } json to_json_oaicompat_chat() { + // Handle progress responses for chat completions + if (is_progress_response) { + std::time_t t = std::time(0); + return json { + {"choices", json::array({ + json { + {"finish_reason", nullptr}, + {"index", 0}, + {"delta", json::object()}, + }, + })}, + {"created", t}, + {"id", oaicompat_cmpl_id}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "chat.completion.chunk"}, + {"prompt_processing", json { + {"n_past", n_past}, + {"n_prompt_tokens", n_prompt_tokens}, + {"n_prompt_tokens_processed", n_prompt_tokens_processed}, + {"progress", progress}, + }}, + }; + } + bool first = n_decoded == 1; std::time_t t = std::time(0); json choices; @@ -2515,6 +2557,41 @@ struct server_context { queue_results.send(std::move(res)); } + void send_progress_response(server_slot & slot) { + // Only send progress if explicitly requested and streaming is enabled + if (!slot.params.return_progress || !slot.params.stream) { + return; + } + + // Calculate current progress percentage + float current_progress = slot.n_prompt_tokens > 0 ? + (float) slot.n_prompt_tokens_processed / slot.n_prompt_tokens : 0.0f; + + + auto res = std::make_unique(); + + res->id = slot.id_task; + res->index = slot.index; + res->content = ""; // empty content for progress responses + res->tokens = {}; // empty tokens for progress responses + + res->n_decoded = 0; // no tokens decoded yet during prompt processing + res->n_prompt_tokens = slot.n_prompt_tokens; + + // Progress-specific fields + res->is_progress_response = true; + res->n_past = slot.n_past; + res->n_prompt_tokens_processed = slot.n_prompt_tokens_processed; + res->progress = current_progress; + + res->verbose = slot.params.verbose; + res->oaicompat = slot.params.oaicompat; + res->oaicompat_model = slot.params.oaicompat_model; + res->oaicompat_cmpl_id = slot.params.oaicompat_cmpl_id; + + queue_results.send(std::move(res)); + } + void send_final_response(server_slot & slot) { auto res = std::make_unique(); res->id = slot.id_task; @@ -3340,8 +3417,12 @@ struct server_context { SLT_INF(slot, "prompt processing progress, n_past = %d, n_tokens = %d, progress = %f\n", slot.n_past, batch.n_tokens, (float) slot.n_prompt_tokens_processed / slot.n_prompt_tokens); + // Send progress response if requested + send_progress_response(slot); + // entire prompt has been processed if (slot.n_past == slot.n_prompt_tokens) { + slot.state = SLOT_STATE_DONE_PROMPT; GGML_ASSERT(batch.n_tokens > 0); diff --git a/tools/server/tests/unit/test_chat_completion.py b/tools/server/tests/unit/test_chat_completion.py index 7ee9a1651400d..bec5c441f3395 100644 --- a/tools/server/tests/unit/test_chat_completion.py +++ b/tools/server/tests/unit/test_chat_completion.py @@ -8,6 +8,7 @@ def create_server(): global server server = ServerPreset.tinyllama2() + server.server_port = 8080 @pytest.mark.parametrize( @@ -351,3 +352,165 @@ def test_logprobs_stream(): assert token.top_logprobs is not None assert len(token.top_logprobs) > 0 assert aggregated_text == output_text + + +def test_progress_feature_enabled(): + """Test progress feature when return_progress is enabled""" + global server + server.start() + + # Create a long prompt to ensure multiple batches are processed + long_prompt = "This is a comprehensive test prompt designed to verify the progress functionality thoroughly. " * 100 + + res = server.make_stream_request("POST", "/chat/completions", data={ + "max_tokens": 10, + "messages": [ + {"role": "user", "content": long_prompt}, + ], + "stream": True, + "return_progress": True, + }) + + progress_responses = [] + content_responses = [] + + for data in res: + choice = data["choices"][0] + + # Check for progress responses (they can be at root level or in delta) + if "prompt_processing" in data: + progress_responses.append(data["prompt_processing"]) + elif "delta" in choice and "prompt_processing" in choice["delta"]: + progress_responses.append(choice["delta"]["prompt_processing"]) + elif "delta" in choice and "content" in choice["delta"] and choice["delta"]["content"]: + content_responses.append(data) + + # Verify we received progress responses + assert len(progress_responses) > 0, "No progress responses received" + + # Verify the last progress response shows 100% completion + last_progress = progress_responses[-1] + assert last_progress["progress"] >= 0.99, f"Progress did not reach 100% (last: {last_progress['progress']*100:.1f}%)" + + # Verify we received content responses + assert len(content_responses) > 0, "No content responses received" + + +def test_progress_feature_disabled(): + """Test that progress is not sent when return_progress is disabled""" + global server + server.start() + + # Create a long prompt + long_prompt = "This is a comprehensive test prompt designed to verify the progress functionality thoroughly. " * 100 + + res = server.make_stream_request("POST", "/chat/completions", data={ + "max_tokens": 10, + "messages": [ + {"role": "user", "content": long_prompt}, + ], + "stream": True, + "return_progress": False, # Disable progress + }) + + progress_responses = [] + content_responses = [] + + for data in res: + choice = data["choices"][0] + + # Check for progress responses (they can be at root level or in delta) + if "prompt_processing" in data: + progress_responses.append(data["prompt_processing"]) + elif "delta" in choice and "prompt_processing" in choice["delta"]: + progress_responses.append(choice["delta"]["prompt_processing"]) + elif "delta" in choice and "content" in choice["delta"] and choice["delta"]["content"]: + content_responses.append(data) + + # Verify no progress responses were received + assert len(progress_responses) == 0, f"Progress responses received when disabled: {len(progress_responses)}" + + # Verify we still received content responses + assert len(content_responses) > 0, "No content responses received" + + +def test_progress_feature_completion_endpoint(): + """Test progress feature on /completion endpoint""" + global server + server.start() + + # Create a long prompt + long_prompt = "This is a comprehensive test prompt designed to verify the progress functionality thoroughly. " * 100 + + res = server.make_stream_request("POST", "/completion", data={ + "prompt": long_prompt, + "stream": True, + "return_progress": True, + "max_tokens": 10, + }) + + progress_responses = [] + content_responses = [] + + for data in res: + # Check for progress responses in /completion format + if "prompt_processing" in data: + progress_responses.append(data["prompt_processing"]) + elif "content" in data and data["content"]: + content_responses.append(data) + + # Verify we received progress responses + assert len(progress_responses) > 0, "No progress responses received from /completion endpoint" + + # Verify the last progress response shows 100% completion + last_progress = progress_responses[-1] + assert last_progress["progress"] >= 0.99, f"Progress did not reach 100% (last: {last_progress['progress']*100:.1f}%)" + + # Verify we received content responses + assert len(content_responses) > 0, "No content responses received from /completion endpoint" + + +def test_progress_feature_with_different_batch_sizes(): + """Test progress feature behavior with different batch processing scenarios""" + global server + server.start() + + # Test with different prompt lengths to simulate different batch processing + test_cases = [ + ("Short prompt", "Short test prompt"), + ("Medium prompt", "This is a medium length test prompt designed to test progress functionality. " * 20), + ("Long prompt", "This is a comprehensive test prompt designed to verify the progress functionality thoroughly. " * 100), + ] + + for test_name, prompt in test_cases: + res = server.make_stream_request("POST", "/chat/completions", data={ + "max_tokens": 5, + "messages": [ + {"role": "user", "content": prompt}, + ], + "stream": True, + "return_progress": True, + }) + + progress_responses = [] + content_responses = [] + + for data in res: + choice = data["choices"][0] + + # Check for progress responses (they can be at root level or in delta) + if "prompt_processing" in data: + progress_responses.append(data["prompt_processing"]) + elif "delta" in choice and "prompt_processing" in choice["delta"]: + progress_responses.append(choice["delta"]["prompt_processing"]) + elif "delta" in choice and "content" in choice["delta"] and choice["delta"]["content"]: + content_responses.append(data) + + # Verify progress functionality works for all prompt lengths + assert len(progress_responses) > 0, f"No progress responses for {test_name}" + assert len(content_responses) > 0, f"No content responses for {test_name}" + + # Verify progress reaches 100% + if progress_responses: + last_progress = progress_responses[-1] + assert last_progress["progress"] >= 0.99, f"Progress did not reach 100% for {test_name} (last: {last_progress['progress']*100:.1f}%)"