From 13392b227197995f5c525afd19c3b5abd1fd7ade Mon Sep 17 00:00:00 2001 From: Shamit Surana Date: Thu, 13 Mar 2025 14:20:49 -0700 Subject: [PATCH 1/2] New documentation --- README.md | 370 ++++++++---------------------------------------------- 1 file changed, 51 insertions(+), 319 deletions(-) diff --git a/README.md b/README.md index f0372c3..0789a9f 100644 --- a/README.md +++ b/README.md @@ -24,341 +24,73 @@ Feedbridge is using the [Spezi](https://github.com/StanfordSpezi/Spezi) ecosyste ## Overview -Feedbridge is +Physiologic poor feeding in newborns, leading to poor weight gain, jaundice, and hospital readmissions, is a significant public health concern. FeedBridge aims to provide personalized, data-driven guidance to parents and physicians, enabling timely intervention and improved newborn outcomes. | Screenshot displaying the Dashboard interface of Feedbridge. | Screenshot displaying the Entry Adding interface of Feedbridge. | Screenshot displaying the Settings interface of Feedbridge. | | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | | `Dashboard View` | `Add Entry View` | `Settings` | +## Setup Instructions -## Feedbridge Features - -*Provide a comprehensive description of your application, including figures showing the application. You can learn more on how to structure a README in the [Stanford Spezi Documentation Guide](https://swiftpackageindex.com/stanfordspezi/spezi/documentation/spezi/documentation-guide)* - -> [!NOTE] -> Do you want to learn more about the Stanford Spezi Template Application and how to use, extend, and modify this application? Check out the [Stanford Spezi Template Application documentation](https://stanfordspezi.github.io/SpeziTemplateApplication) - - -## Contributing - -Contributions to this project are welcome. Please make sure to read the [contribution guidelines](https://github.com/StanfordSpezi/.github/blob/main/CONTRIBUTING.md) and the [contributor covenant code of conduct](https://github.com/StanfordSpezi/.github/blob/main/CODE_OF_CONDUCT.md) first. - - -## License - -This project is licensed under the MIT License. See [Licenses](LICENSES) for more information. - -![Spezi Footer](https://raw.githubusercontent.com/StanfordSpezi/.github/main/assets/FooterLight.png#gh-light-mode-only) -![Spezi Footer](https://raw.githubusercontent.com/StanfordSpezi/.github/main/assets/FooterDark.png#gh-dark-mode-only) - - - - - - - -# Spezi LLM - -[![Build and Test](https://github.com/StanfordSpezi/SpeziLLM/actions/workflows/build-and-test.yml/badge.svg)](https://github.com/StanfordSpezi/SpeziLLM/actions/workflows/build-and-test.yml) -[![codecov](https://codecov.io/gh/StanfordSpezi/SpeziLLM/branch/main/graph/badge.svg?token=pptLyqtoNR)](https://codecov.io/gh/StanfordSpezi/SpeziLLM) -[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7954213.svg)](https://doi.org/10.5281/zenodo.7954213) -[![](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2FStanfordSpezi%2FSpeziLLM%2Fbadge%3Ftype%3Dswift-versions)](https://swiftpackageindex.com/StanfordSpezi/SpeziLLM) -[![](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2FStanfordSpezi%2FSpeziLLM%2Fbadge%3Ftype%3Dplatforms)](https://swiftpackageindex.com/StanfordSpezi/SpeziLLM) - - -## Overview - -The Spezi LLM Swift Package includes modules that are helpful to integrate LLM-related functionality in your application. -The package provides all necessary tools for local LLM execution, the usage of remote OpenAI-based LLMs, as well as LLMs running on Fog node resources within the local network. - -| Screenshot displaying the Chat View utilizing the OpenAI API from SpeziLLMOpenAI. | Screenshot displaying the Local LLM Download View from SpeziLLMLocalDownload. | Screenshot displaying the Chat View utilizing a locally executed LLM via SpeziLLMLocal. | -| :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | -| `OpenAI LLM Chat View` | `Language Model Download` | `Local LLM Chat View` | +You can build and run the application using [Xcode](https://developer.apple.com/xcode/) by opening up the **Feedbridge.xcodeproj**. -## Setup +The application provides a [Firebase Firestore](https://firebase.google.com/docs/firestore)-based data upload and [Firebase Authentication](https://firebase.google.com/docs/auth) login & sign-up. +It is required to have the [Firebase Emulator Suite](https://firebase.google.com/docs/emulator-suite) to be up and running to use these features to build and test the application locally. Please follow the [installation instructions](https://firebase.google.com/docs/emulator-suite/install_and_configure). -### 1. Add Spezi LLM as a Dependency +You do not have to make any modifications to the Firebase configuration, login into the `firebase` CLI using your Google account, or create a project in firebase to run, build, and test the application! -You need to add the SpeziLLM Swift package to -[your app in Xcode](https://developer.apple.com/documentation/xcode/adding-package-dependencies-to-your-app#) or -[Swift package](https://developer.apple.com/documentation/xcode/creating-a-standalone-swift-package-with-xcode#Add-a-dependency-on-another-Swift-package). - -> [!IMPORTANT] -> If your application is not yet configured to use Spezi, follow the [Spezi setup article](https://swiftpackageindex.com/stanfordspezi/spezi/documentation/spezi/initial-setup) to set up the core Spezi infrastructure. - -### 2. Follow the setup steps of the individual targets - -As Spezi LLM contains a variety of different targets for specific LLM functionalities, please follow the additional setup guide in the respective target section of this README. - -## Targets - -Spezi LLM provides a number of targets to help developers integrate LLMs in their Spezi-based applications: -- [SpeziLLM](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm): Base infrastructure of LLM execution in the Spezi ecosystem. -- [SpeziLLMLocal](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmlocal): Local LLM execution capabilities directly on-device. Enables running open-source LLMs from Hugging Face like [Meta's Llama2](https://ai.meta.com/llama/), [Microsoft's Phi](https://azure.microsoft.com/en-us/products/phi), [Google's Gemma](https://ai.google.dev/gemma), or [DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1), among others. See [LLMLocalModel](https://swiftpackageindex.com/stanfordspezi/spezillm/main/documentation/spezillmlocal/llmlocalmodel) for a list of models tested with SpeziLLM. -- [SpeziLLMLocalDownload](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmlocaldownload): Download and storage manager of local Language Models, including onboarding views. -- [SpeziLLMOpenAI](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmopenai): Integration with OpenAI's GPT models via using OpenAI's API service. -- [SpeziLLMFog](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmfog): Discover and dispatch LLM inference jobs to Fog node resources within the local network. - -The section below highlights the setup and basic use of the [SpeziLLMLocal](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmlocal), [SpeziLLMOpenAI](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmopenai), and [SpeziLLMFog](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmfog) targets in order to integrate Language Models in a Spezi-based application. - -> [!NOTE] -> To learn more about the usage of the individual targets, please refer to the [DocC documentation of the package](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation). - -### Spezi LLM Local - -The target enables developers to easily execute medium-size Language Models (LLMs) locally on-device. The module allows you to interact with the locally run LLM via purely Swift-based APIs, no interaction with low-level code is necessary, building on top of the infrastructure of the [SpeziLLM target](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm). - -> [!IMPORTANT] -> Spezi LLM Local is not compatible with simulators. The underlying [`mlx-swift`](https://github.com/ml-explore/mlx-swift) requires a modern Metal MTLGPUFamily and the simulator does not provide that. - -> [!IMPORTANT] -> Important: To use the LLM local target, some LLMs require adding the *Increase Memory Limit* entitlement to the project. - -#### Setup - -You can configure the Spezi Local LLM execution within the typical `SpeziAppDelegate`. -In the example below, the `LLMRunner` from the [SpeziLLM](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm) target which is responsible for providing LLM functionality within the Spezi ecosystem is configured with the `LLMLocalPlatform` from the [SpeziLLMLocal](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmlocal) target. This prepares the `LLMRunner` to locally execute Language Models. - -```swift -class TestAppDelegate: SpeziAppDelegate { - override var configuration: Configuration { - Configuration { - LLMRunner { - LLMLocalPlatform() - } - } - } -} -``` - -[SpeziLLMLocalDownload](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmlocaldownload) can be used to download an LLM from [HuggingFace](https://huggingface.co/) and save it on the device for execution. The `LLMLocalDownloadView` provides an out-of-the-box onboarding view for downloading models locally. - -```swift -struct LLMLocalOnboardingDownloadView: View { - var body: some View { - LLMLocalDownloadView( - model: .llama3_8B_4bit, - downloadDescription: "The Llama3 8B model will be downloaded", - ) { - // Action to perform after the model is downloaded and the user presses the next button. - } - } -} +Startup the [Firebase Emulator Suite](https://firebase.google.com/docs/emulator-suite) using ``` - -> [!TIP] -> The `LLMLocalDownloadView` view can be included in your onboarding process using SpeziOnboarding as [demonstrated in this example](https://swiftpackageindex.com/stanfordspezi/spezillm/main/documentation/spezillmlocaldownload/llmlocaldownloadview#overview). - - -#### Usage - -The code example below showcases the interaction with local LLMs through the the [SpeziLLM](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm) [`LLMRunner`](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm/llmrunner), which is injected into the SwiftUI `Environment` via the `Configuration` shown above. - -The `LLMLocalSchema` defines the type and configurations of the to-be-executed `LLMLocalSession`. This transformation is done via the [`LLMRunner`](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm/llmrunner) that uses the `LLMLocalPlatform`. The inference via `LLMLocalSession/generate()` returns an `AsyncThrowingStream` that yields all generated `String` pieces. - -```swift -struct LLMLocalDemoView: View { - @Environment(LLMRunner.self) var runner - @State var responseText = "" - - var body: some View { - Text(responseText) - .task { - // Instantiate the `LLMLocalSchema` to an `LLMLocalSession` via the `LLMRunner`. - let llmSession: LLMLocalSession = runner( - with: LLMLocalSchema( - model: .llama3_8B_4bit, - ) - ) - - do { - for try await token in try await llmSession.generate() { - responseText.append(token) - } - } catch { - // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. - } - } - } -} +$ firebase emulators:start ``` -The [`LLMChatViewSchema`](https://swiftpackageindex.com/stanfordspezi/spezillm/main/documentation/spezillm/llmchatviewschema) can be used to easily create a conversational chat interface for your chatbot application with a local LLM. - -```swift -struct LLMLocalChatView: View { - var body: some View { - LLMChatViewSchema( - with: LLMLocalSchema( - model: .llama3_8B_4bit - ) - ) - } -} -``` - -> [!NOTE] -> To learn more about the usage of SpeziLLMLocal, please refer to the comprehensive [DocC documentation](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmlocal). - -### Spezi LLM Open AI - -A module that allows you to interact with GPT-based Large Language Models (LLMs) from OpenAI within your Spezi application. -`SpeziLLMOpenAI` provides a pure Swift-based API for interacting with the OpenAI GPT API, building on top of the infrastructure of the [SpeziLLM target](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm). -In addition, `SpeziLLMOpenAI` provides developers with a declarative Domain Specific Language to utilize OpenAI function calling mechanism. This enables a structured, bidirectional, and reliable communication between the OpenAI LLMs and external tools, such as the Spezi ecosystem. - -#### Setup - -In order to use OpenAI LLMs within the Spezi ecosystem, the [SpeziLLM](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm) [`LLMRunner`](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm/llmrunner) needs to be initialized in the Spezi `Configuration` with the `LLMOpenAIPlatform`. Only after, the `LLMRunner` can be used for inference of OpenAI LLMs. -See the [SpeziLLM documentation](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm) for more details. +After the emulators have started up, you can run the application in your simulator to build, test, and run the application. -```swift -import Spezi -import SpeziLLM -import SpeziLLMOpenAI -class LLMOpenAIAppDelegate: SpeziAppDelegate { - override var configuration: Configuration { - Configuration { - LLMRunner { - LLMOpenAIPlatform() - } - } - } -} -``` +## Feedbridge Features -> [!IMPORTANT] -> If using `SpeziLLMOpenAI` on macOS, ensure to add the *`Keychain Access Groups` entitlement* to the enclosing Xcode project via *PROJECT_NAME > Signing&Capabilities > + Capability*. The array of keychain groups can be left empty, only the base entitlement is required. - -#### Usage - -The code example below showcases the interaction with an OpenAI LLM through the the [SpeziLLM](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm) [`LLMRunner`](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm/llmrunner), which is injected into the SwiftUI `Environment` via the `Configuration` shown above. - -The `LLMOpenAISchema` defines the type and configurations of the to-be-executed `LLMOpenAISession`. This transformation is done via the [`LLMRunner`](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm/llmrunner) that uses the `LLMOpenAIPlatform`. The inference via `LLMOpenAISession/generate()` returns an `AsyncThrowingStream` that yields all generated `String` pieces. - -```swift -import SpeziLLM -import SpeziLLMOpenAI -import SwiftUI - -struct LLMOpenAIDemoView: View { - @Environment(LLMRunner.self) var runner - @State var responseText = "" - - var body: some View { - Text(responseText) - .task { - // Instantiate the `LLMOpenAISchema` to an `LLMOpenAISession` via the `LLMRunner`. - let llmSession: LLMOpenAISession = runner( - with: LLMOpenAISchema( - parameters: .init( - modelType: .gpt4_o, - systemPrompt: "You're a helpful assistant that answers questions from users.", - overwritingToken: "abc123" - ) - ) - ) - - do { - for try await token in try await llmSession.generate() { - responseText.append(token) - } - } catch { - // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. - } - } - } -} -``` +**Manual Data Entry** + +User interface that allows parents to enter and store the following data points: +- Feed Data: + - Feed date, time, and type (direct breastfeeding vs bottle feeding) + - Milk type (breastmilk vs formula) + - Feed time (if type is direct breastfeeding) or feed volume (if type is bottle feeding) +- Wet Diaper Entry: + - Volume: light, medium, heavy + - Color: yellow, pink, or red-tinged + - If pink or red-tinged, alert the parent to seek medical care (may indicate dehydration) +- Stool Entry: + - Volume: light, medium, heavy + - Color: Black, dark green, green, brown, yellow, or beige + - If beige is selected, display an alert to seek medical care (could be a sign of liver failure) +- Dehydration Assessment: + - Skin elasticity: assess if the skin over the abdomen is stretchy (use visual aids from Dr. Sankar) + - Dry mucous membranes: check for dry lips and tongue (use visual aids from Dr. Sankar) + - Alert the parent to seek medical care if either indicator is observed +- Weight Entry: + - Accepts input in grams, kilograms, or pounds and ounces + +**Data Visualization** + +The data below are visualized in a graph and timeline format. +- Feeds + - Trend feed duration/volume and milk type over time. +- Wet Diapers + - Display diaper quantity and quality (light, medium, heavy, and color) over time. +- Stools + - Display stool volume and color over time. +- Weights + - Display one weight point per day (average if multiple weight entries exist in one day). + - Determine the color of the weight dot based on the risk normogram from [newbornweight.org](https://newbornweight.org/). + - For high-risk patients, suggest courses of action at home (e.g., triple feeding using visual aids; course mentors can assist in creating this content). + - Additionally advise high-risk patients to seek medical care. +- Dehydration + - Displays if baby is dehydrated and gives appropriate warnings > [!NOTE] -> To learn more about the usage of SpeziLLMOpenAI, please refer to the [DocC documentation](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmopenai). - -### Spezi LLM Fog - -The `SpeziLLMFog` target enables you to use LLMs running on [Fog node](https://en.wikipedia.org/wiki/Fog_computing) computing resources within the local network. The fog nodes advertise their services via [mDNS](https://en.wikipedia.org/wiki/Multicast_DNS), enabling clients to discover all fog nodes serving a specific host within the local network. -`SpeziLLMFog` then dispatches LLM inference jobs dynamically to a random fog node within the local network and streams the response to surface it to the user. - -> [!IMPORTANT] -> `SpeziLLMFog` requires a `SpeziLLMFogNode` within the local network hosted on some computing resource that actually performs the inference requests. `SpeziLLMFog` provides the `SpeziLLMFogNode` Docker-based package that enables an easy setup of these fog nodes. See the `FogNode` directory on the root level of the SPM package as well as the respective `README.md` for more details. - -#### Setup - -In order to use Fog LLMs within the Spezi ecosystem, the [SpeziLLM](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm) [`LLMRunner`](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm/llmrunner) needs to be initialized in the Spezi `Configuration` with the `LLMFogPlatform`. Only after, the `LLMRunner` can be used for inference with Fog LLMs. See the [SpeziLLM documentation](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm) for more details. -The `LLMFogPlatform` needs to be initialized with the custom root CA certificate that was used to sign the fog node web service certificate (see the `FogNode/README.md` documentation for more information). Copy the root CA certificate from the fog node as resource to the application using `SpeziLLMFog` and use it to initialize the `LLMFogPlatform` within the Spezi `Configuration`. - -```swift -class LLMFogAppDelegate: SpeziAppDelegate { - private nonisolated static var caCertificateUrl: URL { - // Return local file URL of root CA certificate in the `.crt` format - } - - override var configuration: Configuration { - Configuration { - LLMRunner { - // Set up the Fog platform with the custom CA certificate - LLMRunner { - LLMFogPlatform(configuration: .init(caCertificate: Self.caCertificateUrl)) - } - } - } - } -} -``` +> Do you want to learn more about the Stanford Spezi Template Application and how to use, extend, and modify the Feedbridge application? Check out the [Stanford Spezi Template Application documentation](https://stanfordspezi.github.io/SpeziTemplateApplication) -#### Usage - -The code example below showcases the interaction with a Fog LLM through the the [SpeziLLM](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm) [`LLMRunner`](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm/llmrunner), which is injected into the SwiftUI `Environment` via the `Configuration` shown above. - -The `LLMFogSchema` defines the type and configurations of the to-be-executed `LLMFogSession`. This transformation is done via the [`LLMRunner`](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillm/llmrunner) that uses the `LLMFogPlatform`. The inference via `LLMFogSession/generate()` returns an `AsyncThrowingStream` that yields all generated `String` pieces. -The `LLMFogSession` automatically discovers all available LLM fog nodes within the local network upon setup and the dispatches the LLM inference jobs to the fog computing resource, streaming back the response and surfaces it to the user. - -> [!IMPORTANT] -> The `LLMFogSchema` accepts a closure that returns an authorization token that is passed with every request to the Fog node in the `Bearer` HTTP field via the `LLMFogParameters/init(modelType:systemPrompt:authToken:)`. The token is created via the closure upon every LLM inference request, as the `LLMFogSession` may be long lasting and the token could therefore expire. Ensure that the closure appropriately caches the token in order to prevent unnecessary token refresh roundtrips to external systems. - -```swift -struct LLMFogDemoView: View { - @Environment(LLMRunner.self) var runner - @State var responseText = "" - - var body: some View { - Text(responseText) - .task { - // Instantiate the `LLMFogSchema` to an `LLMFogSession` via the `LLMRunner`. - let llmSession: LLMFogSession = runner( - with: LLMFogSchema( - parameters: .init( - modelType: .llama7B, - systemPrompt: "You're a helpful assistant that answers questions from users.", - authToken: { - // Return authorization token as `String` or `nil` if no token is required by the Fog node. - } - ) - ) - ) - - do { - for try await token in try await llmSession.generate() { - responseText.append(token) - } - } catch { - // Handle errors here. E.g., you can use `ViewState` and `viewStateAlert` from SpeziViews. - } - } - } -} -``` - -> [!NOTE] -> To learn more about the usage of SpeziLLMFog, please refer to the [DocC documentation](https://swiftpackageindex.com/stanfordspezi/spezillm/documentation/spezillmfog). ## Contributing @@ -367,7 +99,7 @@ Contributions to this project are welcome. Please make sure to read the [contrib ## License -This project is licensed under the MIT License. See [Licenses](https://github.com/StanfordSpezi/SpeziLLM/tree/main/LICENSES) for more information. +This project is licensed under the MIT License. See [Licenses](LICENSES) for more information. ![Spezi Footer](https://raw.githubusercontent.com/StanfordSpezi/.github/main/assets/FooterLight.png#gh-light-mode-only) -![Spezi Footer](https://raw.githubusercontent.com/StanfordSpezi/.github/main/assets/FooterDark.png#gh-dark-mode-only) +![Spezi Footer](https://raw.githubusercontent.com/StanfordSpezi/.github/main/assets/FooterDark.png#gh-dark-mode-only) \ No newline at end of file From 8221031c8e4e0d7a047d968261fccfcd6481226c Mon Sep 17 00:00:00 2001 From: "Pinlin [Calvin] Xu" Date: Thu, 13 Mar 2025 15:19:14 -0700 Subject: [PATCH 2/2] UI testing tweaks --- FeedbridgeUITests/AddBabyTests.swift | 40 ++++++++++----------------- FeedbridgeUITests/AddEntryTests.swift | 14 +++++----- 2 files changed, 22 insertions(+), 32 deletions(-) diff --git a/FeedbridgeUITests/AddBabyTests.swift b/FeedbridgeUITests/AddBabyTests.swift index 63e474b..a5863e4 100644 --- a/FeedbridgeUITests/AddBabyTests.swift +++ b/FeedbridgeUITests/AddBabyTests.swift @@ -47,14 +47,14 @@ class AddBabyTests: XCTestCase { app.buttons["Settings"].tap() // Verify initial state: No baby should be selected - XCTAssertTrue(app.staticTexts["Select Baby"].exists, "Select baby dropdown should be visible") - XCTAssertTrue(app.staticTexts["No baby selected"].exists, "No babies should exist") + XCTAssertTrue(app.staticTexts["Select Baby"].waitForExistence(timeout: 5), "Select baby dropdown should be visible") + XCTAssertTrue(app.staticTexts["No baby selected"].waitForExistence(timeout: 5), "No babies should exist") // Open the dropdown menu and select "Add New Baby" let dropdown = app.buttons["Baby icon, Select Baby, Menu dropdown"] dropdown.tap() let addNew = app.buttons["Add New Baby"] - XCTAssertTrue(addNew.exists, "Should be an option to add a baby") + XCTAssertTrue(addNew.waitForExistence(timeout: 5), "Should be an option to add a baby") addNew.tap() // Ensure that the Save button is initially disabled @@ -67,7 +67,7 @@ class AddBabyTests: XCTestCase { nameField.typeText("Benjamin") // Verify that a duplicate name warning is not displayed - XCTAssertFalse(app.staticTexts["This name is already taken"].exists, "Duplicate name warning should not appear") + XCTAssertFalse(app.staticTexts["This name is already taken"].waitForExistence(timeout: 5), "Duplicate name warning should not appear") // Date Picker: Select March 2025 (valid past date) let datePickersQuery = app.datePickers.firstMatch @@ -82,24 +82,14 @@ class AddBabyTests: XCTestCase { saveButton.tap() // Verify that the new baby is correctly added and displayed in the UI - XCTAssertTrue(app.staticTexts["Benjamin"].exists, "Baby's name should be displayed") - XCTAssertTrue(app.buttons["Baby icon, Benjamin, Menu dropdown"].exists, "Baby dropdown should show new baby") - XCTAssertTrue(app.buttons["Delete Baby, Delete Baby"].exists, "Delete button should be displayed for the new baby") - XCTAssertTrue(app.staticTexts["Use Kilograms"].exists, "The 'Use Kilograms' text should be displayed") - } - - func testNavigateToHealthDetails() { - let app = XCUIApplication() - // app.buttons["Settings"].tap() - - // Ensure at least one baby - if app.staticTexts["No baby selected"].exists { - testAddBaby() - } - + XCTAssertTrue(app.staticTexts["Benjamin"].waitForExistence(timeout: 5), "Baby's name should be displayed") + XCTAssertTrue(app.buttons["Baby icon, Benjamin, Menu dropdown"].waitForExistence(timeout: 5), "Baby dropdown should show new baby") + XCTAssertTrue(app.buttons["Delete Baby, Delete Baby"].waitForExistence(timeout: 5), "Delete button should be displayed for the new baby") + XCTAssertTrue(app.staticTexts["Use Kilograms"].waitForExistence(timeout: 5), "The 'Use Kilograms' text should be displayed") + let healthDetailsCell = app.staticTexts["Health Details"] XCTAssertTrue( - healthDetailsCell.exists, + healthDetailsCell.waitForExistence(timeout: 5), "Health Details navigation link not found." ) healthDetailsCell.tap() @@ -112,10 +102,10 @@ class AddBabyTests: XCTestCase { print("DEBUG: Current UI for 'AddEntryView':\n\(app.debugDescription)") - XCTAssertTrue(app.staticTexts["FEED ENTRIES"].exists, "Feed Entries exists") - XCTAssertTrue(app.staticTexts["WEIGHT ENTRIES"].exists, "Weight Entries exists") - XCTAssertTrue(app.staticTexts["STOOL ENTRIES"].exists, "Stool Entries exists") - XCTAssertTrue(app.staticTexts["VOID ENTRIES"].exists, "Void Entries exists") - XCTAssertTrue(app.staticTexts["DEHYDRATION CHECKS"].exists, "Dehydration Checks exists") + XCTAssertTrue(app.staticTexts["FEED ENTRIES"].waitForExistence(timeout: 5), "Feed Entries exists") + XCTAssertTrue(app.staticTexts["WEIGHT ENTRIES"].waitForExistence(timeout: 5), "Weight Entries exists") + XCTAssertTrue(app.staticTexts["STOOL ENTRIES"].waitForExistence(timeout: 5), "Stool Entries exists") + XCTAssertTrue(app.staticTexts["VOID ENTRIES"].waitForExistence(timeout: 5), "Void Entries exists") + XCTAssertTrue(app.staticTexts["DEHYDRATION CHECKS"].waitForExistence(timeout: 5), "Dehydration Checks exists") } } diff --git a/FeedbridgeUITests/AddEntryTests.swift b/FeedbridgeUITests/AddEntryTests.swift index 62459e1..8bb3c2d 100644 --- a/FeedbridgeUITests/AddEntryTests.swift +++ b/FeedbridgeUITests/AddEntryTests.swift @@ -98,10 +98,10 @@ final class AddEntryTests: XCTestCase { // Check that "No babies found" is displayed XCTAssertTrue( - app.staticTexts["No babies found"].exists, + app.staticTexts["No babies found"].waitForExistence(timeout: 5), "Should show 'No babies found' message if there are no babies." ) - XCTAssertTrue(app.staticTexts["Please add a baby in Settings before adding entries."].exists) + XCTAssertTrue(app.staticTexts["Please add a baby in Settings before adding entries."].waitForExistence(timeout: 5)) } /// Tests adding a weight entry in kilograms. @@ -219,7 +219,7 @@ final class AddEntryTests: XCTestCase { // Enter a feed time let feedTimeField = app.textFields["Feed time (minutes)"] XCTAssertTrue( - feedTimeField.exists, "Feed time text field should be present for direct breastfeeding." + feedTimeField.waitForExistence(timeout: 5), "Feed time text field should be present for direct breastfeeding." ) feedTimeField.tap() feedTimeField.typeText("15") @@ -362,12 +362,12 @@ final class AddEntryTests: XCTestCase { // Toggle poor skin elasticity let poorSkinSwitch = app.switches["Poor Skin Elasticity"] - XCTAssertTrue(poorSkinSwitch.exists, "Poor Skin Elasticity toggle not found.") + XCTAssertTrue(poorSkinSwitch.waitForExistence(timeout: 5), "Poor Skin Elasticity toggle not found.") poorSkinSwitch.tap() // Toggle dry mucous membranes let dryMucousSwitch = app.switches["Dry Mucous Membranes"] - XCTAssertTrue(dryMucousSwitch.exists, "Dry Mucous Membranes toggle not found.") + XCTAssertTrue(dryMucousSwitch.waitForExistence(timeout: 5), "Dry Mucous Membranes toggle not found.") dryMucousSwitch.tap() print("DEBUG: Dehydration toggles set on") @@ -422,7 +422,7 @@ final class AddEntryTests: XCTestCase { // At this point, the entry won't be saved, so "Entry saved successfully!" should NOT appear let successBanner = app.staticTexts["Entry saved successfully!"] - XCTAssertFalse(successBanner.exists, "Success banner should not appear with invalid input.") + XCTAssertFalse(successBanner.waitForExistence(timeout: 5), "Success banner should not appear with invalid input.") } /// Tests that invalid feeding input (zero or negative time or volume) is handled properly. @@ -460,7 +460,7 @@ final class AddEntryTests: XCTestCase { let successBanner = app.staticTexts["Entry saved successfully!"] XCTAssertFalse( - successBanner.exists, "Success banner should not appear with invalid feeding data." + successBanner.waitForExistence(timeout: 5), "Success banner should not appear with invalid feeding data." ) } }