diff --git a/AUDIO_FEATURES.md b/AUDIO_FEATURES.md new file mode 100644 index 00000000..445efeba --- /dev/null +++ b/AUDIO_FEATURES.md @@ -0,0 +1,51 @@ +# تحسينات التطبيق الصوتي 🎵 + +## الميزة الجديدة: تسلسل الصوت عند الاتصال 📞 + +تم إضافة ميزة جديدة رائعة للتطبيق! الآن عندما تضغط على زر "اتصال" سيحدث التالي: + +### تسلسل الأحداث: + +1. **🔔 صوت رنين (3 ثوانٍ)** + - يتم تشغيل صوت رنين لمدة 3 ثوانٍ + - الزر يصبح أصفر ويظهر "جاري التحضير..." + +2. **🗣️ رسالة ترحيبية بالعربية** + - تسمع رسالة تقول: "الآن يمكنك التحدث باللغة العربية" + - تم إنتاج الصوت باستخدام تقنية Text-to-Speech + +3. **🎤 تمكين Realtime** + - بعد انتهاء الرسالة يتم تمكين نظام Realtime + - الزر يصبح أحمر ويظهر "إيقاف المحادثة" + - يمكنك الآن التحدث بحرية + +### الملفات الصوتية المُنشأة: + +- **`ring.wav`**: صوت رنين تم إنتاجه بـ Python باستخدام numpy +- **`arabic_message.mp3`**: رسالة عربية تم إنتاجها بـ Google Text-to-Speech + +### التقنيات المستخدمة: + +1. **NumPy**: لإنتاج صوت الرنين الرقمي +2. **gTTS (Google Text-to-Speech)**: للرسالة العربية +3. **React Hooks**: لإدارة حالة التسلسل الصوتي +4. **Web Audio API**: لتشغيل الأصوات في المتصفح + +### كيفية الاستخدام: + +1. افتح التطبيق على `http://localhost:8765` +2. اضغط على زر "اتصال" +3. استمع لصوت الرنين والرسالة +4. ابدأ التحدث بعد انتهاء الرسالة +5. اضغط "إيقاف المحادثة" للإنهاء + +### المكونات المُحدثة: + +- **useAudioSequence**: Hook جديد للتحكم في تسلسل الأصوات +- **App.tsx**: تحديث لواجهة المستخدم والمنطق +- **StatusMessage**: إضافة حالة جديدة للتحضير +- **audio_generator.py**: أداة إنتاج الأصوات + +--- + +**استمتع بالتجربة الصوتية المحسّنة! 🎉** diff --git a/Voice Ran/Nancy.wav b/Voice Ran/Nancy.wav new file mode 100644 index 00000000..74bb218f Binary files /dev/null and b/Voice Ran/Nancy.wav differ diff --git a/Voice Ran/Ran.mp3 b/Voice Ran/Ran.mp3 new file mode 100644 index 00000000..ceca368e Binary files /dev/null and b/Voice Ran/Ran.mp3 differ diff --git a/Voice Ran/between.wav b/Voice Ran/between.wav new file mode 100644 index 00000000..def49ef2 Binary files /dev/null and b/Voice Ran/between.wav differ diff --git a/app/backend/app.py b/app/backend/app.py index 13a9d02d..d3fa835b 100644 --- a/app/backend/app.py +++ b/app/backend/app.py @@ -41,25 +41,81 @@ async def create_app(): voice_choice=os.environ.get("AZURE_OPENAI_REALTIME_VOICE_CHOICE") or "alloy" ) rtmt.system_message = """ - You are a helpful assistant. Only answer questions based on information you searched in the knowledge base, accessible with the 'search' tool. - The user is listening to answers with audio, so it's *super* important that answers are as short as possible, a single sentence if at all possible. - Never read file names or source names or keys out loud. - Always use the following step-by-step instructions to respond: - 1. Always use the 'search' tool to check the knowledge base before answering a question. - 2. Always use the 'report_grounding' tool to report the source of information from the knowledge base. - 3. Produce an answer that's as short as possible. If the answer isn't in the knowledge base, say you don't know. +You are an order-taking assistant at Circles Restaurant. +Always speak in Egyptian Arabic dialect (Masry ‘Aamiya) with a warm and friendly tone. +Keep responses short and focused. + +Important rules: + +Only answer questions based on information you searched in the knowledge base, accessible with the 'search' tool. + +The user is listening to answers with audio, so it's super important that answers are as short as possible, a single sentence if at all possible. + +Never switch to English. + +Never speak in formal Arabic (Fusha). + +Never give long sentences. + +Stick strictly to the categories and rules below. + +Never read file names, source names, or keys out loud. + +If an item is not in the menu → say: "ليس عندي." + +If you don’t understand → say: "ممكن توضّح أكتر يا فندم؟" + +Always follow these step-by-step instructions when responding: + +Always use the 'search' tool to check the knowledge base before answering a question. + +Always follow the dialogue flow rules for ordering (see below). + +Produce an answer that is as short as possible, one sentence if possible. + +If the item or request is not in the menu, respond politely with "ليس عندي." + +If the request is unclear, ask for clarification with "ممكن توضّح أكتر يا فندم؟" + +Dialogue flow rules: + +Opening line (always start with): +"مساء النور يا فندم في مطعم سيركلز.. إزيّك؟ تحب تطلب إيه؟" + +Categories: Pizza, Burgers, Other Food, Drinks. + +Pizza ordering: + +Always ask for size (small, medium, large). + +Example: "تحبها حجم إيه؟" + +All other items (Burgers, Other Food, Drinks): + +Only one size available. + +Do not ask about size. + +After each order: + +Say: "تحب تزود حاجة تانية؟" + +If yes → say: "تحب تزود إيه؟" + +If no → calculate the total and say: +"الحساب [amount] جنيه.. والأوردر هيكون جاهز بعد نص ساعة." """.strip() attach_rag_tools(rtmt, credentials=search_credential, search_endpoint=os.environ.get("AZURE_SEARCH_ENDPOINT"), search_index=os.environ.get("AZURE_SEARCH_INDEX"), - semantic_configuration=os.environ.get("AZURE_SEARCH_SEMANTIC_CONFIGURATION") or None, - identifier_field=os.environ.get("AZURE_SEARCH_IDENTIFIER_FIELD") or "chunk_id", - content_field=os.environ.get("AZURE_SEARCH_CONTENT_FIELD") or "chunk", - embedding_field=os.environ.get("AZURE_SEARCH_EMBEDDING_FIELD") or "text_vector", - title_field=os.environ.get("AZURE_SEARCH_TITLE_FIELD") or "title", - use_vector_query=(os.getenv("AZURE_SEARCH_USE_VECTOR_QUERY", "true") == "true") + semantic_configuration=None, # لا نستخدم البحث الدلالي + identifier_field=os.environ.get("AZURE_SEARCH_IDENTIFIER_FIELD") or "ID", + content_field=os.environ.get("AZURE_SEARCH_CONTENT_FIELD") or "ingredients", + embedding_field="", # لا نستخدم الـ embedding + title_field=os.environ.get("AZURE_SEARCH_TITLE_FIELD") or "Name", + use_vector_query=False # إيقاف البحث الشعاعي ) rtmt.attach_to_app(app, "/realtime") @@ -67,6 +123,8 @@ async def create_app(): current_directory = Path(__file__).parent app.add_routes([web.get('/', lambda _: web.FileResponse(current_directory / 'static/index.html'))]) app.router.add_static('/', path=current_directory / 'static', name='static') + # إضافة route منفصل للملفات الصوتية + app.router.add_static('/audio', path=current_directory / 'static/audio', name='audio') return app diff --git a/app/backend/ragtools.py b/app/backend/ragtools.py index 6c790f9e..752dda91 100644 --- a/app/backend/ragtools.py +++ b/app/backend/ragtools.py @@ -58,21 +58,37 @@ async def _search_tool( use_vector_query: bool, args: Any) -> ToolResult: print(f"Searching for '{args['query']}' in the knowledge base.") - # Hybrid query using Azure AI Search with (optional) Semantic Ranker - vector_queries = [] - if use_vector_query: - vector_queries.append(VectorizableTextQuery(text=args['query'], k_nearest_neighbors=50, fields=embedding_field)) + print(f"Using fields: ID={identifier_field}, Content={content_field}") + + # Simple text search in Azure AI Search (no vector search, no semantic search) search_results = await search_client.search( search_text=args["query"], - query_type="semantic" if semantic_configuration else "simple", - semantic_configuration_name=semantic_configuration, + query_type="simple", # استخدام البحث النصي البسيط فقط top=5, - vector_queries=vector_queries, - select=", ".join([identifier_field, content_field]) + select=f"{identifier_field},Name,{content_field},Price" # تحديد الحقول المطلوبة ) + result = "" + result_count = 0 async for r in search_results: - result += f"[{r[identifier_field]}]: {r[content_field]}\n-----\n" + result_count += 1 + print(f"Search result {result_count}: {r}") # طباعة النتيجة الكاملة + + # استخدام الحقول الصحيحة + id_field = r.get(identifier_field, f"Item_{result_count}") + name_field = r.get('Name', "Unknown Item") + content_field_value = r.get(content_field, "No description available") + price = r.get('Price', 'سعر غير محدد') + + # عرض النتائج بصيغة: [ID]: Name - ingredients (Price جنيه) + result += f"[{id_field}]: {name_field} - {content_field_value} ({price} جنيه)\n-----\n" + + if result_count == 0: + print("No search results found!") + result = "ليس عندي." + else: + print(f"Found {result_count} results") + return ToolResult(result, ToolResultDirection.TO_SERVER) KEY_PATTERN = re.compile(r'^[a-zA-Z0-9_=\-]+$') @@ -83,21 +99,16 @@ async def _report_grounding_tool(search_client: SearchClient, identifier_field: sources = [s for s in args["sources"] if KEY_PATTERN.match(s)] list = " OR ".join(sources) print(f"Grounding source: {list}") - # Use search instead of filter to align with how detailt integrated vectorization indexes - # are generated, where chunk_id is searchable with a keyword tokenizer, not filterable + # Use search instead of filter to align with how the index is structured search_results = await search_client.search(search_text=list, search_fields=[identifier_field], select=[identifier_field, title_field, content_field], top=len(sources), query_type="full") - # If your index has a key field that's filterable but not searchable and with the keyword analyzer, you can - # use a filter instead (and you can remove the regex check above, just ensure you escape single quotes) - # search_results = await search_client.search(filter=f"search.in(chunk_id, '{list}')", select=["chunk_id", "title", "chunk"]) - docs = [] async for r in search_results: - docs.append({"chunk_id": r[identifier_field], "title": r[title_field], "chunk": r[content_field]}) + docs.append({"ID": r[identifier_field], "Name": r[title_field], "ingredients": r[content_field]}) return ToolResult({"sources": docs}, ToolResultDirection.TO_CLIENT) def attach_rag_tools(rtmt: RTMiddleTier, diff --git a/app/backend/requirements.txt b/app/backend/requirements.txt index 460af937..17bf238e 100644 Binary files a/app/backend/requirements.txt and b/app/backend/requirements.txt differ diff --git a/app/backend/test_search.py b/app/backend/test_search.py new file mode 100644 index 00000000..7dd5a67d --- /dev/null +++ b/app/backend/test_search.py @@ -0,0 +1,79 @@ +import asyncio +import os +from dotenv import load_dotenv +from azure.search.documents.aio import SearchClient +from azure.core.credentials import AzureKeyCredential + +# تحديد مسار ملف .env +dotenv_path = os.path.join(os.path.dirname(__file__), '.env') +load_dotenv(dotenv_path) + +print(f"Loading .env from: {dotenv_path}") + +async def test_azure_search(): + # تحقق من المتغيرات + service_endpoint = os.getenv('AZURE_SEARCH_ENDPOINT') + api_key = os.getenv('AZURE_SEARCH_API_KEY') + index_name = os.getenv('AZURE_SEARCH_INDEX') + + print(f"Service endpoint: {service_endpoint}") + print(f"Index name: {index_name}") + print(f"API key: {'***' + api_key[-4:] if api_key else 'None'}") + + if not all([service_endpoint, api_key, index_name]): + print("❌ Missing required environment variables!") + return + + # إنشاء العميل + search_client = SearchClient( + endpoint=service_endpoint, + index_name=index_name, + credential=AzureKeyCredential(api_key) + ) + + try: + print("\n--- Testing searches ---") + + # اختبارات مختلفة + test_queries = [ + "بيتزا", + "pizza", + "برجر", + "burger", + "دجاج", + "chicken", + "*", # البحث عن جميع النتائج + ] + + for query in test_queries: + print(f"\n🔍 البحث عن: '{query}'") + + search_results = await search_client.search( + search_text=query, + query_type="simple", + top=3, + select="ID,Name,ingredients,Price" + ) + + result_count = 0 + async for result in search_results: + result_count += 1 + print(f" النتيجة {result_count}:") + print(f" ID: {result.get('ID', 'N/A')}") + print(f" Name: {result.get('Name', 'N/A')}") + print(f" ingredients: {result.get('ingredients', 'N/A')}") + print(f" Price: {result.get('Price', 'N/A')}") + print(" ---") + + if result_count == 0: + print(f" ❌ لا توجد نتائج للبحث عن '{query}'") + else: + print(f" ✅ وجدت {result_count} نتائج") + + except Exception as e: + print(f"❌ خطأ في البحث: {e}") + finally: + await search_client.close() + +if __name__ == "__main__": + asyncio.run(test_azure_search()) diff --git a/app/frontend/src/App.tsx b/app/frontend/src/App.tsx index ebfde279..f6f256a7 100644 --- a/app/frontend/src/App.tsx +++ b/app/frontend/src/App.tsx @@ -10,6 +10,7 @@ import StatusMessage from "@/components/ui/status-message"; import useRealTime from "@/hooks/useRealtime"; import useAudioRecorder from "@/hooks/useAudioRecorder"; import useAudioPlayer from "@/hooks/useAudioPlayer"; +import useAudioSequence from "@/hooks/useAudioSequence"; import { GroundingFile, ToolResult } from "./types"; @@ -17,6 +18,7 @@ import logo from "./assets/logo.svg"; function App() { const [isRecording, setIsRecording] = useState(false); + const [isPlayingSequence, setIsPlayingSequence] = useState(false); const [groundingFiles, setGroundingFiles] = useState([]); const [selectedFile, setSelectedFile] = useState(null); @@ -29,6 +31,7 @@ function App() { isRecording && playAudio(message.delta); }, onReceivedInputAudioBufferSpeechStarted: () => { + console.log("Speech started detected"); stopAudioPlayer(); }, onReceivedExtensionMiddleTierToolResponse: message => { @@ -45,18 +48,36 @@ function App() { const { reset: resetAudioPlayer, play: playAudio, stop: stopAudioPlayer } = useAudioPlayer(); const { start: startAudioRecording, stop: stopAudioRecording } = useAudioRecorder({ onAudioRecorded: addUserAudio }); - const onToggleListening = async () => { - if (!isRecording) { + // Hook للتسلسل الصوتي + const { playAudioSequence } = useAudioSequence({ + onSequenceComplete: async () => { + // بعد انتهاء تسلسل الأصوات بالكامل، بدء الريل تايم + console.log('Audio sequence completed, starting realtime...'); + setIsPlayingSequence(false); + setIsRecording(true); + + // بدء جلسة الريل تايم startSession(); + + // بدء تسجيل الصوت await startAudioRecording(); resetAudioPlayer(); + } + }); - setIsRecording(true); - } else { + const onToggleListening = async () => { + console.log('onToggleListening called. isRecording:', isRecording, 'isPlayingSequence:', isPlayingSequence); + + if (!isRecording && !isPlayingSequence) { + // بدء تسلسل الأصوات + console.log('Starting audio sequence...'); + setIsPlayingSequence(true); + playAudioSequence(); + } else if (isRecording) { + console.log('Stopping recording...'); await stopAudioRecording(); stopAudioPlayer(); inputAudioBufferClear(); - setIsRecording(false); } }; @@ -75,21 +96,40 @@ function App() {
- +
diff --git a/app/frontend/src/components/ui/status-message.tsx b/app/frontend/src/components/ui/status-message.tsx index 52a89089..225aad61 100644 --- a/app/frontend/src/components/ui/status-message.tsx +++ b/app/frontend/src/components/ui/status-message.tsx @@ -3,10 +3,16 @@ import { useTranslation } from "react-i18next"; type Properties = { isRecording: boolean; + isPlayingSequence?: boolean; }; -export default function StatusMessage({ isRecording }: Properties) { +export default function StatusMessage({ isRecording, isPlayingSequence = false }: Properties) { const { t } = useTranslation(); + + if (isPlayingSequence) { + return

جاري تحضير الاتصال... استمع للتعليمات

; + } + if (!isRecording) { return

{t("status.notRecordingMessage")}

; } diff --git a/app/frontend/src/hooks/useAudioSequence.tsx b/app/frontend/src/hooks/useAudioSequence.tsx new file mode 100644 index 00000000..628ab8d7 --- /dev/null +++ b/app/frontend/src/hooks/useAudioSequence.tsx @@ -0,0 +1,89 @@ +import { useCallback } from 'react'; + +interface UseAudioSequenceProps { + onSequenceComplete: () => void; +} + +const useAudioSequence = ({ onSequenceComplete }: UseAudioSequenceProps) => { + const playAudioSequence = useCallback(async () => { + try { + console.log('Starting audio sequence...'); + + // 1. تشغيل Ran.mp3 (الرنة) + console.log('Playing Ran.mp3...'); + const ranAudio = new Audio('/audio/Ran.mp3'); + + await new Promise((resolve, reject) => { + ranAudio.onended = () => { + console.log('Ran.mp3 ended'); + resolve(); + }; + ranAudio.onerror = (error) => { + console.error('Error playing Ran.mp3:', error); + reject(error); + }; + ranAudio.play().catch(reject); + }); + + // 2. انتظار قصير قبل تشغيل between.wav + console.log('Waiting before between.wav...'); + await new Promise(resolve => setTimeout(resolve, 500)); + + // 3. تشغيل between.wav + console.log('Playing between.wav...'); + const betweenAudio = new Audio('/audio/between.wav'); + + await new Promise((resolve, reject) => { + betweenAudio.onended = () => { + console.log('between.wav ended'); + resolve(); + }; + betweenAudio.onerror = (error) => { + console.error('Error playing between.wav:', error); + reject(error); + }; + betweenAudio.play().catch(reject); + }); + + // 4. انتظار قصير قبل تشغيل Nancy.wav + console.log('Waiting before Nancy.wav...'); + await new Promise(resolve => setTimeout(resolve, 500)); + + // 5. تشغيل Nancy.wav + console.log('Playing Nancy.wav...'); + const nancyAudio = new Audio('/audio/Nancy.wav'); + + await new Promise((resolve, reject) => { + nancyAudio.onended = () => { + console.log('Nancy.wav ended'); + resolve(); + }; + nancyAudio.onerror = (error) => { + console.error('Error playing Nancy.wav:', error); + reject(error); + }; + nancyAudio.play().catch(reject); + }); + + // 6. انتظار قصير قبل تمكين الـ Realtime + console.log('Waiting before enabling Realtime...'); + await new Promise(resolve => setTimeout(resolve, 500)); + + // 7. استدعاء الدالة للإشارة إلى انتهاء التسلسل + console.log('Audio sequence completed, calling onSequenceComplete'); + onSequenceComplete(); + + } catch (error) { + console.error('Error playing audio sequence:', error); + // في حالة حدوث خطأ، مازلنا نريد تمكين الـ Realtime + console.error('Audio sequence failed, calling onSequenceComplete anyway'); + onSequenceComplete(); + } + }, [onSequenceComplete]); + + return { + playAudioSequence + }; +}; + +export default useAudioSequence; diff --git a/app/frontend/src/hooks/useRealtime.tsx b/app/frontend/src/hooks/useRealtime.tsx index e2b37e2a..9538885b 100644 --- a/app/frontend/src/hooks/useRealtime.tsx +++ b/app/frontend/src/hooks/useRealtime.tsx @@ -56,14 +56,24 @@ export default function useRealTime({ : `/realtime`; const { sendJsonMessage } = useWebSocket(wsEndpoint, { - onOpen: () => onWebSocketOpen?.(), - onClose: () => onWebSocketClose?.(), - onError: event => onWebSocketError?.(event), + onOpen: () => { + console.log('WebSocket opened - but session not started yet'); + onWebSocketOpen?.(); + }, + onClose: () => { + console.log('WebSocket closed'); + onWebSocketClose?.(); + }, + onError: event => { + console.error('WebSocket error:', event); + onWebSocketError?.(event); + }, onMessage: event => onMessageReceived(event), - shouldReconnect: () => true + shouldReconnect: () => false // منع إعادة الاتصال التلقائي }); const startSession = () => { + console.log('Starting realtime session...'); const command: SessionUpdateCommand = { type: "session.update", session: { @@ -79,6 +89,7 @@ export default function useRealTime({ }; } + console.log('Sending session update command:', command); sendJsonMessage(command); }; diff --git a/test_app/app.py b/test_app/app.py new file mode 100644 index 00000000..3f4aa028 --- /dev/null +++ b/test_app/app.py @@ -0,0 +1,507 @@ +from aiohttp import web +import json +import os +import logging + +# إعداد التسجيل +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +async def index(request): + """صفحة رئيسية بسيطة""" + html = """ + + + + + + اختبار التسلسل الصوتي + + + +
+

🎵 اختبار التسلسل الصوتي

+ + + + + +
+ جاهز للاختبار +
+ +
+ 🎤 المايك نشط - مستوى الصوت: +
+
+
+
+
+ + + + + """ + return web.Response(text=html, content_type='text/html') + +async def static_handler(request): + """معالج الملفات الثابتة""" + file_path = request.match_info['path'] + static_dir = os.path.join(os.path.dirname(__file__), 'static') + file_full_path = os.path.join(static_dir, file_path) + + if not os.path.exists(file_full_path): + return web.Response(status=404, text="File not found") + + # تحديد نوع المحتوى + if file_path.endswith('.wav'): + content_type = 'audio/wav' + elif file_path.endswith('.mp3'): + content_type = 'audio/mpeg' + else: + content_type = 'application/octet-stream' + + with open(file_full_path, 'rb') as f: + return web.Response(body=f.read(), content_type=content_type) + +def create_app(): + """إنشاء التطبيق""" + app = web.Application() + + # إضافة المسارات + app.router.add_get('/', index) + app.router.add_get('/static/{path:.*}', static_handler) + + return app + +if __name__ == '__main__': + app = create_app() + + print("🚀 بدء تشغيل خادم اختبار التسلسل الصوتي...") + print("📍 الرابط: http://localhost:8080") + print("⏹️ للإيقاف: اضغط Ctrl+C") + + web.run_app(app, host='localhost', port=8080) diff --git a/test_search.py b/test_search.py new file mode 100644 index 00000000..f4c66135 --- /dev/null +++ b/test_search.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +""" +سكريبت لاختبار Azure AI Search مباشرة +""" +import os +import asyncio +from azure.core.credentials import AzureKeyCredential +from azure.search.documents.aio import SearchClient +from dotenv import load_dotenv + +async def test_search(): + # تحميل الإعدادات + load_dotenv("app/backend/.env") + + # معلومات الاتصال + search_endpoint = os.environ.get("AZURE_SEARCH_ENDPOINT") + search_index = os.environ.get("AZURE_SEARCH_INDEX") + search_key = os.environ.get("AZURE_SEARCH_API_KEY") + + print(f"🔍 اختبار Azure Search:") + print(f" Endpoint: {search_endpoint}") + print(f" Index: {search_index}") + print(f" Key: {search_key[:10]}...") + + # إنشاء عميل البحث + search_client = SearchClient( + search_endpoint, + search_index, + AzureKeyCredential(search_key) + ) + + print("\n📊 اختبار البحث العام...") + try: + # بحث عام للحصول على جميع البيانات + search_results = await search_client.search( + search_text="*", # البحث عن كل شيء + top=10, + select="*" # جلب جميع الحقول + ) + + result_count = 0 + async for result in search_results: + result_count += 1 + print(f"\n--- النتيجة {result_count} ---") + for key, value in result.items(): + print(f" {key}: {value}") + + if result_count == 0: + print("❌ لا توجد بيانات في الفهرس!") + else: + print(f"\n✅ تم العثور على {result_count} سجل(ات)") + + except Exception as e: + print(f"❌ خطأ في البحث: {e}") + + print("\n🍕 اختبار البحث عن البيتزا...") + try: + # بحث محدد عن البيتزا + search_results = await search_client.search( + search_text="pizza", + top=5, + select="*" + ) + + result_count = 0 + async for result in search_results: + result_count += 1 + print(f"\n--- نتيجة البيتزا {result_count} ---") + for key, value in result.items(): + print(f" {key}: {value}") + + if result_count == 0: + print("❌ لم يتم العثور على بيتزا!") + else: + print(f"\n✅ تم العثور على {result_count} نوع(أ) من البيتزا") + + except Exception as e: + print(f"❌ خطأ في البحث عن البيتزا: {e}") + + await search_client.close() + +if __name__ == "__main__": + asyncio.run(test_search())