diff --git a/sdk/monitor/azure-monitor-ingestion/CHANGELOG.md b/sdk/monitor/azure-monitor-ingestion/CHANGELOG.md deleted file mode 100644 index e0fb2a6c0aca..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/CHANGELOG.md +++ /dev/null @@ -1,267 +0,0 @@ -# Release History - -## 1.3.0-beta.1 (Unreleased) - -### Features Added - -### Breaking Changes - -### Bugs Fixed - -### Other Changes - -## 1.2.9 (2025-03-24) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-json` from `1.4.0` to version `1.5.0`. -- Upgraded `azure-core` from `1.55.2` to version `1.55.3`. -- Upgraded `azure-core-http-netty` from `1.15.10` to version `1.15.11`. - - -## 1.2.8 (2025-02-25) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core-http-netty` from `1.15.7` to version `1.15.10`. -- Upgraded `azure-json` from `1.3.0` to version `1.4.0`. -- Upgraded `azure-core` from `1.54.1` to version `1.55.2`. - - -## 1.2.7 (2024-12-04) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.53.0` to version `1.54.1`. -- Upgraded `azure-core-http-netty` from `1.15.5` to version `1.15.7`. - - -## 1.2.6 (2024-10-25) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.52.0` to version `1.53.0`. -- Upgraded `azure-core-http-netty` from `1.15.4` to version `1.15.5`. - - -## 1.2.5 (2024-09-27) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core-http-netty` from `1.15.3` to version `1.15.4`. -- Upgraded `azure-core` from `1.51.0` to version `1.52.0`. -- Upgraded `azure-json` from `1.2.0` to version `1.3.0`. - - -## 1.2.4 (2024-08-24) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.50.0` to version `1.51.0`. -- Upgraded `azure-core-http-netty` from `1.15.2` to version `1.15.3`. -- Upgraded `azure-json` from `1.1.0` to version `1.2.0`. - - -## 1.2.3 (2024-07-26) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.49.1` to version `1.50.0`. -- Upgraded `azure-core-http-netty` from `1.15.1` to version `1.15.2`. - - -## 1.2.2 (2024-06-27) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.49.0` to version `1.49.1`. -- Upgraded `azure-core-http-netty` from `1.15.0` to version `1.15.1`. - - -## 1.2.1 (2024-05-28) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.48.0` to version `1.49.0`. -- Upgraded `azure-core-http-netty` from `1.14.2` to version `1.15.0`. - - -## 1.2.0 (2024-04-12) - -### Features Added - -- Introduced `LogsIngestionAudience` to allow specification of the audience of logs ingestion clients. -- Support for the scopes of non-public clouds. -- Migration to stream-style serialization using `azure-json` - -### Other Changes - -#### Dependency Updates - -- Added `azure-json` version `1.1.0` as a dependency. -- Upgraded `azure-core-http-netty` from `1.14.1` to version `1.14.2`. -- Upgraded `azure-core` from `1.47.0` to version `1.48.0`. - -## 1.1.5 (2024-03-11) - -### Other Changes - -- Updated the JavaDoc documentation to increase support for our clients. - -#### Dependency Updates - -- Upgraded `azure-core-http-netty` from `1.14.0` to version `1.14.1`. -- Upgraded `azure-core` from `1.46.0` to version `1.47.0`. - -## 1.1.4 (2024-02-20) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core-http-netty` from `1.13.11` to version `1.14.0`. -- Upgraded `azure-core` from `1.45.1` to version `1.46.0`. - - -## 1.1.3 (2023-12-04) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core-http-netty` from `1.13.10` to version `1.13.11`. -- Upgraded `azure-core` from `1.45.0` to version `1.45.1`. - -## 1.1.2 (2023-11-20) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.44.1` to version `1.45.0`. -- Upgraded `azure-core-http-netty` from `1.13.9` to version `1.13.10`. - -## 1.1.1 (2023-10-20) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.43.0` to version `1.44.1`. -- Upgraded `azure-core-http-netty` from `1.13.7` to version `1.13.9`. - -## 1.1.0 (2023-09-13) - -### Features Added -- `LogsIngestionClient` now implements `AutoCloseable` interface and can be used in try-with-resources block. - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.42.0` to version `1.43.0`. -- Upgraded `azure-core-http-netty` from `1.13.6` to version `1.13.7`. - -## 1.0.6 (2023-08-18) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.41.0` to version `1.42.0`. -- Upgraded `azure-core-http-netty` from `1.13.5` to version `1.13.6`. - -## 1.0.5 (2023-07-25) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.40.0` to version `1.41.0`. -- Upgraded `azure-core-http-netty` from `1.13.4` to version `1.13.5`. - -## 1.0.4 (2023-06-20) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.39.0` to version `1.40.0`. -- Upgraded `azure-core-http-netty` from `1.13.3` to version `1.13.4`. - -## 1.0.3 (2023-05-23) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core-http-netty` from `1.13.2` to version `1.13.3`. -- Upgraded `azure-core` from `1.38.0` to version `1.39.0`. - -## 1.0.2 (2023-04-26) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.37.0` to version `1.38.0`. -- Upgraded `azure-core-http-netty` from `1.13.1` to version `1.13.2`. - -## 1.0.1 (2023-03-18) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.36.0` to version `1.37.0`. -- Upgraded `azure-core-http-netty` from `1.13.0` to version `1.13.1`. - -## 1.0.0 (2023-02-15) - -### Features Added -- Added `logsUploadErrorConsumer` to `LogsUploadOptions` that is called when there's any service error while uploading logs. -- `upload` methods on `LogsIngestionClient` now take an `Iterable` of logs instead of `List`. - -### Breaking Changes -- Removed `UploadLogsResult` from the response of `upload` methods in `LogsIngestionClient` - -### Other Changes - -#### Dependency Updates -- Upgraded `azure-core` to `1.36.0`. -- Upgraded `azure-core-http-netty` to `1.13.0`. - -## 1.0.0-beta.2 (2022-08-17) - -### Other Changes - -#### Dependency updates -- Upgraded `azure-core` to version `1.31.0`. -- Upgraded `azure-core-http-netty` to version `1.12.4`. - -## 1.0.0-beta.1 (2022-06-17) -Version 1.0.0-beta.1 is a preview of our efforts in creating a client library for Azure Logs Ingestion that is -developer-friendly, idiomatic to the Java ecosystem, and as consistent across different languages and platforms as -possible. The principles that guide our efforts can be found in the -[Azure SDK Design Guidelines for Java](https://azure.github.io/azure-sdk/java_introduction.html). - -## Features Added -- Initial release. Please see the README for information on using the new library. diff --git a/sdk/monitor/azure-monitor-ingestion/README.md b/sdk/monitor/azure-monitor-ingestion/README.md deleted file mode 100644 index 134f46cbe3f1..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/README.md +++ /dev/null @@ -1,253 +0,0 @@ -# Azure Monitor Ingestion client library for Java - -The Azure Monitor Ingestion client library is used to send custom logs to [Azure Monitor][azure_monitor_overview] using -the [Logs Ingestion API][ingestion_overview]. - -This library allows you to send data from virtually any source to supported built-in tables or to custom tables -that you create in Log Analytics workspace. You can even extend the schema of built-in tables with custom columns. - -## Getting started - -### Prerequisites -- A [Java Development Kit (JDK)][jdk_link], version 8 or later. - - Here are details about [Java 8 client compatibility with Azure Certificate Authority](https://learn.microsoft.com/azure/security/fundamentals/azure-ca-details?tabs=root-and-subordinate-cas-list#client-compatibility-for-public-pkis). -- [Azure Subscription][azure_subscription] -- A [Data Collection Endpoint][data_collection_endpoint] -- A [Data Collection Rule][data_collection_rule] -- A [Log Analytics workspace][log_analytics_workspace] - -### Include the package - -#### Include the BOM file - -Please include the `azure-sdk-bom` to your project to take a dependency on the latest stable version of the library. In -the following snippet, replace the `{bom_version_to_target}` placeholder with the version number. -To learn more about the BOM, see the [AZURE SDK BOM README](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/boms/azure-sdk-bom/README.md). - -```xml - - - - com.azure - azure-sdk-bom - {bom_version_to_target} - pom - import - - - -``` -and then include the direct dependency in the dependencies section without the version tag as shown below. - -```xml - - - com.azure - azure-monitor-ingestion - - -``` - -#### Include direct dependency - -If you want to take dependency on a particular version of the library that is not present in the BOM, -add the direct dependency to your project as follows. - -[//]: # ({x-version-update-start;com.azure:azure-monitor-ingestion;current}) -```xml - - com.azure - azure-monitor-ingestion - 1.3.0-beta.1 - -``` -[//]: # ({x-version-update-end}) - -### Create the client - -An authenticated client is required to upload logs to Azure Monitor. The library includes both synchronous and asynchronous forms of the clients. To authenticate, -the following examples use `DefaultAzureCredentialBuilder` from the [azure-identity](https://central.sonatype.com/artifact/com.azure/azure-identity/1.8.1) package. - -#### Authenticating using Azure Active Directory -You can authenticate with Azure Active Directory using the [Azure Identity library][azure_identity]. -To use the [DefaultAzureCredential][DefaultAzureCredential] provider shown below, or other credential providers provided with the Azure SDK, please include the `azure-identity` package: - -[//]: # ({x-version-update-start;com.azure:azure-identity;dependency}) -```xml - - com.azure - azure-identity - 1.15.3 - -``` -[//]: # ({x-version-update-end}) - -Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET. - -#### Synchronous Logs Ingestion client - -```java readme-sample-createLogsIngestionClient -DefaultAzureCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - -LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(tokenCredential) - .buildClient(); -``` - -#### Asynchronous Logs Ingestion client - -```java readme-sample-createLogsIngestionAsyncClient -DefaultAzureCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - -LogsIngestionAsyncClient asyncClient = new LogsIngestionClientBuilder() - .endpoint("") - .credential(tokenCredential) - .buildAsyncClient(); -``` -## Key concepts - -### Data Collection Endpoint - -Data Collection Endpoints (DCEs) allow you to uniquely configure ingestion settings for Azure Monitor. [This -article][data_collection_endpoint] provides an overview of data collection endpoints including their contents and -structure and how you can create and work with them. - -### Data Collection Rule - -Data collection rules (DCR) define data collected by Azure Monitor and specify how and where that data should be sent or -stored. The REST API call must specify a DCR to use. A single DCE can support multiple DCRs, so you can specify a -different DCR for different sources and target tables. - -The DCR must understand the structure of the input data and the structure of the target table. If the two don't match, -it can use a transformation to convert the source data to match the target table. You may also use the transform to -filter source data and perform any other calculations or conversions. - -For more details, see [Data collection rules in Azure Monitor][data_collection_rule]. For information on how to retrieve -a DCR ID, see [this tutorial][data_collection_rule_tutorial]. - -### Log Analytics Workspace Tables - -Custom logs can send data to any custom table that you create and to certain built-in tables in your Log Analytics -workspace. The target table must exist before you can send data to it. The following built-in tables are currently supported: - -- [CommonSecurityLog](https://learn.microsoft.com/azure/azure-monitor/reference/tables/commonsecuritylog) -- [SecurityEvents](https://learn.microsoft.com/azure/azure-monitor/reference/tables/securityevent) -- [Syslog](https://learn.microsoft.com/azure/azure-monitor/reference/tables/syslog) -- [WindowsEvents](https://learn.microsoft.com/azure/azure-monitor/reference/tables/windowsevent) - -### Logs retrieval -The logs that were uploaded using this library can be queried using the -[Azure Monitor Query](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/monitor/azure-monitor-query#readme) -client library. - -## Examples - -- [Upload custom logs](#upload-custom-logs) -- [Upload custom logs with max concurrency](#upload-custom-logs-with-max-concurrency) -- [Upload custom logs with error handling](#upload-custom-logs-with-error-handling) - -### Upload custom logs - -```java readme-sample-uploadLogs -DefaultAzureCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - -LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(tokenCredential) - .buildClient(); - -List logs = getLogs(); -client.upload("", "", logs); -System.out.println("Logs uploaded successfully"); -``` - -### Upload custom logs with max concurrency - -If the in input logs collection is too large, the client will split the input into multiple smaller requests. These -requests are sent serially, by default, but by configuring the max concurrency in `LogsUploadOptions`, these requests -can be concurrently sent to the service as shown in the example below. - -```java readme-sample-uploadLogsWithMaxConcurrency -DefaultAzureCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - -LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(tokenCredential) - .buildClient(); - -List logs = getLogs(); -LogsUploadOptions logsUploadOptions = new LogsUploadOptions() - .setMaxConcurrency(3); -client.upload("", "", logs, logsUploadOptions, - Context.NONE); -System.out.println("Logs uploaded successfully"); -``` - -### Upload custom logs with error handling - -When uploading large collection of logs, the client splits the input into multiple smaller service requests. The upload -method provides an option to handle individual service errors through an error handler as shown in the example below. -This error handler include the exception details and the list of all logs that failed to upload. If an error handler is -not provided, the upload method will throw an aggregate exception that includes all the service errors. - -```java readme-sample-uploadLogs-error-handler -DefaultAzureCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - -LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(tokenCredential) - .buildClient(); - -List logs = getLogs(); - -LogsUploadOptions logsUploadOptions = new LogsUploadOptions() - .setLogsUploadErrorConsumer(uploadLogsError -> { - System.out.println("Error message " + uploadLogsError.getResponseException().getMessage()); - System.out.println("Total logs failed to upload = " + uploadLogsError.getFailedLogs().size()); - - // throw the exception here to abort uploading remaining logs - // throw uploadLogsError.getResponseException(); - }); -client.upload("", "", logs, logsUploadOptions, - Context.NONE); -``` -## Troubleshooting - -For details on diagnosing various failure scenarios, see our [troubleshooting guide](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/monitor/azure-monitor-ingestion/TROUBLESHOOTING.md). - -## Next steps -More samples can be found [here][samples]. - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License -Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. -For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the -PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this -once across all repos using our CLA. - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact -[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - - -[azure_identity]: https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/identity/azure-identity -[azure_monitor_overview]: https://learn.microsoft.com/azure/azure-monitor/overview -[azure_subscription]: https://azure.microsoft.com/free -[cla]: https://cla.microsoft.com -[coc]: https://opensource.microsoft.com/codeofconduct/ -[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ -[coc_contact]: mailto:opencode@microsoft.com -[data_collection_endpoint]: https://learn.microsoft.com//azure/azure-monitor/essentials/data-collection-endpoint-overview -[data_collection_rule]: https://learn.microsoft.com/azure/azure-monitor/essentials/data-collection-rule-overview -[data_collection_rule_tutorial]: https://learn.microsoft.com/azure/azure-monitor/logs/tutorial-logs-ingestion-portal#collect-information-from-the-dcr -[DefaultAzureCredential]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/identity/azure-identity/README.md#defaultazurecredential -[ingestion_overview]: https://learn.microsoft.com/azure/azure-monitor/logs/logs-ingestion-api-overview -[jdk_link]: https://learn.microsoft.com/java/azure/jdk/?view=azure-java-stable -[log_analytics_workspace]: https://learn.microsoft.com//azure/azure-monitor/logs/log-analytics-workspace-overview -[logging]: https://learn.microsoft.com//azure/developer/java/sdk/logging-overview -[samples]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion - diff --git a/sdk/monitor/azure-monitor-ingestion/TROUBLESHOOTING.md b/sdk/monitor/azure-monitor-ingestion/TROUBLESHOOTING.md deleted file mode 100644 index c9591b2ee5c7..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/TROUBLESHOOTING.md +++ /dev/null @@ -1,132 +0,0 @@ -# Troubleshooting Azure Monitor Ingestion client library issues - -This troubleshooting guide contains instructions to diagnose frequently encountered issues while using the Azure Monitor Ingestion client library for Java. - -## Table of contents - -* [General troubleshooting](#general-troubleshooting) - * [Enable client logging](#enable-client-logging) - * [Enable HTTP request/response logging](#enable-http-requestresponse-logging) - * [Troubleshooting authentication issues](#authentication-errors) - * [Troubleshooting NoSuchMethodError or NoClassDefFoundError](#dependency-conflicts) -* [Troubleshooting logs ingestion](#troubleshooting-logs-ingestion) - * [Troubleshooting authorization errors](#troubleshooting-authorization-errors) - * [Troubleshooting missing logs](#troubleshooting-missing-logs) - * [Troubleshooting slow logs upload](#troubleshooting-slow-logs-upload) - -## General troubleshooting - -### Enable client logging - -To troubleshoot issues with the Azure Monitor Ingestion library, it's important to first enable logging to monitor the behavior of the application. The errors and warnings in the logs generally provide useful insights into what went wrong and sometimes include corrective actions to fix issues. - -The Azure client libraries for Java have two logging options: - -* A built-in logging framework. -* Support for logging using the [SLF4J](https://www.slf4j.org/) interface. - -Refer to the instructions in this reference document on how -to [configure logging in Azure SDK for Java](https://learn.microsoft.com/azure/developer/java/sdk/logging-overview). - -### Enable HTTP request/response logging - -Reviewing the HTTP request sent or response received over the wire to/from the Azure Monitor service can be useful in troubleshooting issues. To enable logging the HTTP request and response payload, the `LogsIngestionClient` can be configured as follows: - -```java readme-sample-enablehttplogging -LogsIngestionClient logsIngestionClient = new LogsIngestionClientBuilder() - .endpoint("") - .credential(credential) - .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) - .buildClient(); -``` - -Alternatively, you can configure logging HTTP requests and responses for your entire application by setting the `AZURE_HTTP_LOG_DETAIL_LEVEL` environment variable. This change will enable logging for every Azure client that supports logging HTTP requests/responses. - -Environment variable name: `AZURE_HTTP_LOG_DETAIL_LEVEL` - -| Value | Logging level | -|------------------|----------------------------------------------------------------------| -| `none` | HTTP request/response logging is disabled | -| `basic` | Logs only URLs, HTTP methods, and time to finish the request. | -| `headers` | Logs everything in `basic`, plus all the request and response headers. | -| `body` | Logs everything in `basic`, plus all the request and response body. | -| `body_and_headers` | Logs everything in `headers` and `body`. | - -**NOTE**: When logging the request and response bodies, ensure that they don't contain confidential information. When logging headers, the client library has a default set of headers that are considered safe to log. This set can be updated by updating the log options in the builder, as follows: - -```java -clientBuilder.httpLogOptions(new HttpLogOptions().addAllowedHeaderName("safe-to-log-header-name")) -``` - -### Authentication errors - -The Azure Monitor Ingestion library supports Azure Active Directory authentication. The `LogsIngestionClientBuilder` can be configured to set the `credential`. To provide a valid credential, you can use `azure-identity` dependency. For more information on getting started, see the [README](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/monitor/azure-monitor-ingestion#create-the-client) of the Azure Monitor Ingestion library. For more information on the credential types supported in `azure-identity`, see the [Azure Identity library documentation](https://learn.microsoft.com/azure/developer/java/sdk/identity). - -#### Setting audience for Non-public clouds - -When not using the default Azure public cloud, you may need to set the audience for the credential. The audience is the resource identifier of the Azure Monitor service. You can set the audience of you `LogIngestionClient` and `LogIngestionAsyncClient` by using the `audience` method on the `LogIngestionClientBuilder` object. -See the [UploadLogsSample](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsSample.java) for an example of setting the audience. - - -### Dependency conflicts - -If you see `NoSuchMethodError` or `NoClassDefFoundError` during your application runtime, this is due to a dependency version conflict. For more information on why this happens and [ways to mitigate this issue](https://learn.microsoft.com/azure/developer/java/sdk/troubleshooting-dependency-version-conflict#mitigate-version-mismatch-issues), see [troubleshooting dependency version conflicts](https://learn.microsoft.com/azure/developer/java/sdk/troubleshooting-dependency-version-conflict). - -## Troubleshooting logs ingestion - -### Troubleshooting authorization errors - -If you get an HTTP error with status code 403 (Forbidden), it means that the provided credentials have insufficient permissions to upload logs to the specified Data Collection Endpoint (DCE) and Data Collection Rule (DCR) ID. - -```text -com.azure.core.exception.HttpResponseException: Status code 403, "{"error":{"code":"OperationFailed","message":"The -authentication token provided does not have access to ingest data for the data collection rule with immutable Id -'' PipelineAccessResult: AccessGranted: False, IsRbacPresent: False, IsDcrDceBindingValid: , DcrArmId: , - Message: Required authorization action was not found for tenantId objectId on resourceId - ConfigurationId: .."}}" -``` - -1. Check that the application or user making the request has sufficient permissions: - * See this document to [manage access to data collection rule](https://learn.microsoft.com/azure/azure-monitor/logs/tutorial-logs-ingestion-portal#assign-permissions-to-the-dcr). - * To ingest logs, ensure the service principal is assigned the **Monitoring Metrics Publisher** role for the data collection rule. -1. If the user or application is granted sufficient privileges to upload logs, ensure you're authenticating as that user/application. If you're authenticating using the [DefaultAzureCredential](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/identity/azure-identity/README.md#authenticating-with-defaultazurecredential), check the logs to verify that the credential used is the one you expected. To enable logging, see the [Enable client logging](#enable-client-logging) section. -1. The permissions may take up to 30 minutes to propagate. So, if the permissions were granted recently, retry after some time. - -### Troubleshooting missing logs - -When you send logs to Azure Monitor for ingestion, the request may succeed, but you may not see the data appearing in the designated Log Analytics workspace table as configured in the DCR. To investigate and resolve this issue, ensure the following: - -* Double-check that you're using the correct data collection endpoint when configuring the `LogsIngestionClientBuilder`. Using the wrong endpoint can result in data not being properly sent to the Log Analytics workspace. - -* Make sure you provide the correct DCR ID to the `upload` method. The DCR ID is an immutable identifier that determines the transformation rules applied to the uploaded logs and directs them to the appropriate Log Analytics workspace table. - -* Verify that the custom table specified in the DCR exists in the Log Analytics workspace. Ensure that you provide the accurate name of the custom table to the upload method. Mismatched table names can lead to logs not being stored correctly. - -* Confirm that the logs you're sending adhere to the format expected by the DCR. The data should be in the form of a JSON object or array, structured according to the requirements specified in the DCR. Additionally, it's essential to encode the request body in UTF-8 to avoid any data transmission issues. - -* Keep in mind that data ingestion may take some time, especially if you're sending data to a specific table for the first time. In such cases, allow up to 15 minutes for the data to be fully ingested and available for querying and analysis. - -### Troubleshooting slow logs upload - -If you experience delays when uploading logs, it could be due to reaching service limits, which may trigger the rate limiter to throttle your client. To determine if your client has reached service limits, you can enable logging and check if the service is returning errors with an HTTP status code 429. For more information on service limits, see the [Azure Monitor service limits documentation](https://learn.microsoft.com/azure/azure-monitor/service-limits#logs-ingestion-api). - -To enable client logging and to troubleshoot this issue further, see the instructions provided in the section titled [Enable client logging](#enable-client-logging). - -If there are no throttling errors, then consider increasing the concurrency to upload multiple log requests in parallel. -To set the concurrency, use the `UploadLogsOptions` type's `setMaxConcurrency` method. - -```java readme-sample-uploadLogsWithMaxConcurrency -DefaultAzureCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - -LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(tokenCredential) - .buildClient(); - -List logs = getLogs(); -LogsUploadOptions logsUploadOptions = new LogsUploadOptions() - .setMaxConcurrency(3); -client.upload("", "", logs, logsUploadOptions, - Context.NONE); -System.out.println("Logs uploaded successfully"); -``` diff --git a/sdk/monitor/azure-monitor-ingestion/assets.json b/sdk/monitor/azure-monitor-ingestion/assets.json deleted file mode 100644 index 60c7fa7c5ec8..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/assets.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "AssetsRepo": "Azure/azure-sdk-assets", - "AssetsRepoPrefixPath": "java", - "TagPrefix": "java/monitor/azure-monitor-ingestion", - "Tag": "java/monitor/azure-monitor-ingestion_9ef5518906" -} diff --git a/sdk/monitor/azure-monitor-ingestion/checkstyle-suppressions.xml b/sdk/monitor/azure-monitor-ingestion/checkstyle-suppressions.xml deleted file mode 100644 index 40a1dff89d56..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/checkstyle-suppressions.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/sdk/monitor/azure-monitor-ingestion/pom.xml b/sdk/monitor/azure-monitor-ingestion/pom.xml deleted file mode 100644 index 04ed6f6c47d6..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/pom.xml +++ /dev/null @@ -1,100 +0,0 @@ - - - - 4.0.0 - - com.azure - azure-monitor-ingestion - 1.3.0-beta.1 - - jar - Microsoft Azure SDK for Azure Monitor Data Ingestion - This package contains Microsoft Azure Monitor Data Ingestion SDK. - - com.azure - azure-client-sdk-parent - 1.7.0 - ../../parents/azure-client-sdk-parent - - https://github.com/Azure/azure-sdk-for-java - - - - The MIT License (MIT) - http://opensource.org/licenses/MIT - repo - - - - - - azure-java-build-docs - ${site.url}/site/${project.artifactId} - - - - - scm:git:https://github.com/Azure/azure-sdk-for-java - scm:git:git@github.com:Azure/azure-sdk-for-java.git - HEAD - - - - UTF-8 - - - - --add-exports com.azure.core/com.azure.core.implementation.jackson=ALL-UNNAMED - --add-exports com.azure.core/com.azure.core.implementation.util=ALL-UNNAMED - --add-exports com.azure.monitor.ingestion/com.azure.monitor.ingestion.implementation=ALL-UNNAMED - --add-opens com.azure.monitor.ingestion/com.azure.monitor.ingestion=ALL-UNNAMED - --add-opens com.azure.monitor.ingestion/com.azure.monitor.ingestion=com.fasterxml.jackson.databind - --add-opens com.azure.monitor.ingestion/com.azure.monitor.ingestion.implementation=com.fasterxml.jackson.databind - - - checkstyle-suppressions.xml - false - spotbugs-exclude.xml - - - - - microsoft - Microsoft - - - - - - com.azure - azure-core - 1.55.3 - - - com.azure - azure-core-http-netty - 1.15.11 - - - com.azure - azure-json - 1.5.0 - - - - - com.azure - azure-core-test - 1.27.0-beta.8 - test - - - com.azure - azure-identity - 1.15.4 - test - - - diff --git a/sdk/monitor/azure-monitor-ingestion/spotbugs-exclude.xml b/sdk/monitor/azure-monitor-ingestion/spotbugs-exclude.xml deleted file mode 100644 index f71b91164c41..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/spotbugs-exclude.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionAsyncClient.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionAsyncClient.java deleted file mode 100644 index e8258c04fac5..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionAsyncClient.java +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.annotation.ReturnType; -import com.azure.core.annotation.ServiceClient; -import com.azure.core.annotation.ServiceMethod; -import com.azure.core.credential.TokenCredential; -import com.azure.core.exception.ClientAuthenticationException; -import com.azure.core.exception.HttpResponseException; -import com.azure.core.exception.ResourceModifiedException; -import com.azure.core.exception.ResourceNotFoundException; -import com.azure.core.http.HttpHeader; -import com.azure.core.http.HttpHeaderName; -import com.azure.core.http.rest.RequestOptions; -import com.azure.core.http.rest.Response; -import com.azure.core.util.BinaryData; -import com.azure.core.util.Context; -import com.azure.monitor.ingestion.implementation.Batcher; -import com.azure.monitor.ingestion.implementation.IngestionUsingDataCollectionRulesAsyncClient; -import com.azure.monitor.ingestion.implementation.IngestionUsingDataCollectionRulesClient; -import com.azure.monitor.ingestion.implementation.LogsIngestionRequest; -import com.azure.monitor.ingestion.implementation.UploadLogsResponseHolder; -import com.azure.monitor.ingestion.models.LogsUploadError; -import com.azure.monitor.ingestion.models.LogsUploadException; -import com.azure.monitor.ingestion.models.LogsUploadOptions; -import reactor.core.publisher.Mono; -import reactor.core.publisher.SynchronousSink; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.function.Consumer; - -import static com.azure.core.util.FluxUtil.withContext; -import static com.azure.monitor.ingestion.implementation.Utils.GZIP; -import static com.azure.monitor.ingestion.implementation.Utils.getConcurrency; -import static com.azure.monitor.ingestion.implementation.Utils.gzipRequest; - -/** - *

This class provides an asynchronous client for uploading custom logs to an Azure Monitor Log Analytics workspace. - * This client encapsulates REST API calls, used to send data to a Log Analytics workspace, into a set of asynchronous - * operations.

- * - *

Getting Started

- * - *

To create an instance of the {@link LogsIngestionClient}, use the {@link LogsIngestionClientBuilder} and configure - * the various options provided by the builder to customize the client as per your requirements. There are two required - * properties that should be set to build a client: - *

    - *
  1. {@code endpoint} - The data collection endpoint. - * See {@link LogsIngestionClientBuilder#endpoint(String) endpoint} method for more details.
  2. - *
  3. {@code credential} - The AAD authentication credential that has the "Monitoring Metrics Publisher" role assigned - * to it. - * Azure Identity - * provides a variety of AAD credential types that can be used. See - * {@link LogsIngestionClientBuilder#credential(TokenCredential) credential} method for more details.
  4. - *
- * - *

Instantiating an asynchronous Logs ingestion client

- * - *
- * LogsIngestionAsyncClient logsIngestionAsyncClient = new LogsIngestionClientBuilder()
- *         .credential(tokenCredential)
- *         .endpoint("<data-collection-endpoint>")
- *         .buildAsyncClient();
- * 
- * - * - *

Client Usage

- * - *

- * For additional information on how to use this client, see the following method documentation: - *

- * - *
    - *
  • - * {@link #upload(String, String, Iterable) upload(String, String, Iterable)} - Uploads - * logs to a Log Analytics workspace. - *
  • - *
  • - * {@link #upload(String, String, Iterable, LogsUploadOptions) upload(String, String, Iterable, LogsUploadOptions)} - * - Uploads logs to a Log Analytics workspace with options to configure the upload request. - *
  • - *
  • - * {@link #uploadWithResponse(String, String, BinaryData, RequestOptions) uploadWithResponse(String, String, BinaryData, RequestOptions)} - * - Uploads logs to a Log Analytics workspace with options to configure the HTTP request. - *
  • - *
- * - * @see LogsIngestionClientBuilder - * @see LogsIngestionClient - * @see com.azure.monitor.ingestion - */ -@ServiceClient(isAsync = true, builder = LogsIngestionClientBuilder.class) -public final class LogsIngestionAsyncClient { - private final IngestionUsingDataCollectionRulesAsyncClient service; - - /** - * Creates a {@link LogsIngestionAsyncClient} that sends requests to the data collection endpoint. - * - * @param service The {@link IngestionUsingDataCollectionRulesClient} that the client routes its request through. - */ - LogsIngestionAsyncClient(IngestionUsingDataCollectionRulesAsyncClient service) { - this.service = service; - } - - /** - * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be - * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split - * the input logs into multiple smaller requests before sending to the service. - * - *

- * Each log in the input collection must be a valid JSON object. The JSON object should match the - * schema defined - * by the stream name. The stream's schema can be found in the Azure portal. - *

- * - *

Upload logs to Azure Monitor

- * - *
-     * List<Object> logs = getLogs();
-     * logsIngestionAsyncClient.upload("<data-collection-rule-id>", "<stream-name>", logs)
-     *         .subscribe();
-     * 
- * - * - * @param ruleId the data collection rule id that is configured to collect and transform the logs. - * @param streamName the stream name configured in data collection rule that matches defines the structure of the - * logs sent in this request. - * @param logs the collection of logs to be uploaded. - * @return the {@link Mono} that completes on completion of the upload request. - * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. - * @throws IllegalArgumentException if {@code logs} is empty. - */ - @ServiceMethod(returns = ReturnType.SINGLE) - public Mono upload(String ruleId, String streamName, Iterable logs) { - return upload(ruleId, streamName, logs, new LogsUploadOptions()); - } - - /** - * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be - * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split - * the input logs into multiple smaller requests before sending to the service. If an - * {@link LogsUploadOptions#setLogsUploadErrorConsumer(Consumer) error handler} is set, - * then the service errors are surfaced to the error handler and the subscriber of this method won't receive an - * error signal. - * - *

- * Each log in the input collection must be a valid JSON object. The JSON object should match the - * schema defined - * by the stream name. The stream's schema can be found in the Azure portal. - *

- * - * - *

Upload logs to Azure Monitor

- * - *
-     * List<Object> logs = getLogs();
-     * LogsUploadOptions logsUploadOptions = new LogsUploadOptions().setMaxConcurrency(4);
-     * logsIngestionAsyncClient.upload("<data-collection-rule-id>", "<stream-name>", logs, logsUploadOptions)
-     *         .subscribe();
-     * 
- * - * - * @param ruleId the data collection rule id that is configured to collect and transform the logs. - * @param streamName the stream name configured in data collection rule that matches defines the structure of the - * logs sent in this request. - * @param logs the collection of logs to be uploaded. - * @param options the options to configure the upload request. - * @return the {@link Mono} that completes on completion of the upload request. - * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. - * @throws IllegalArgumentException if {@code logs} is empty. - */ - @ServiceMethod(returns = ReturnType.SINGLE) - public Mono upload(String ruleId, String streamName, Iterable logs, LogsUploadOptions options) { - return withContext(context -> upload(ruleId, streamName, logs, options, context)); - } - - /** - * This method is used to upload logs to Azure Monitor Log Analytics with specified data collection rule id and - * stream name. This upload method provides a more granular control of the HTTP request sent to the service. Use - * {@link RequestOptions} to configure the HTTP request. - * - *

- * The input logs should be a JSON array with each element in the array - * matching the schema defined - * by the stream name. The stream's schema can be found in the Azure portal. This content will be gzipped before - * sending to the service. If the content is already gzipped, then set the {@code Content-Encoding} header to - * {@code gzip} using {@link RequestOptions#setHeader(HttpHeaderName, String) requestOptions} and pass the content - * as is. - *

- * - *

Header Parameters - * - * - * - * - * - * - *
Header Parameters
NameTypeRequiredDescription
Content-EncodingStringNogzip
x-ms-client-request-idStringNoClient request Id
- * - *

Request Body Schema - * - *

{@code
-     * [
-     *     Object
-     * ]
-     * }
- * - * @param ruleId The immutable Id of the Data Collection Rule resource. - * @param streamName The streamDeclaration name as defined in the Data Collection Rule. - * @param logs An array of objects matching the schema defined by the provided stream. - * @param requestOptions The options to configure the HTTP request before HTTP client sends it. - * @return the {@link Response} on successful completion of {@link Mono}. - * @throws HttpResponseException thrown if the request is rejected by server. - * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. - * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. - * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - */ - @ServiceMethod(returns = ReturnType.SINGLE) - public Mono> uploadWithResponse(String ruleId, String streamName, BinaryData logs, - RequestOptions requestOptions) { - Objects.requireNonNull(ruleId, "'ruleId' cannot be null."); - Objects.requireNonNull(streamName, "'streamName' cannot be null."); - Objects.requireNonNull(logs, "'logs' cannot be null."); - - if (requestOptions == null) { - requestOptions = new RequestOptions(); - } - requestOptions.addRequestCallback(request -> { - HttpHeader httpHeader = request.getHeaders().get(HttpHeaderName.CONTENT_ENCODING); - if (httpHeader == null) { - BinaryData gzippedRequest = BinaryData.fromBytes(gzipRequest(logs.toBytes())); - request.setBody(gzippedRequest); - request.setHeader(HttpHeaderName.CONTENT_ENCODING, GZIP); - } - }); - return service.uploadWithResponse(ruleId, streamName, logs, requestOptions); - } - - Mono upload(String ruleId, String streamName, Iterable logs, LogsUploadOptions options, - Context context) { - return Mono.defer(() -> splitAndUpload(ruleId, streamName, logs, options, context)); - } - - /** - * This method splits the input logs into < 1MB HTTP requests and uploads to the Azure Monitor service. - * - * @param ruleId The data collection rule id. - * @param streamName The stream name configured in the data collection rule. - * @param logs The input logs to upload. - * @param options The options to configure the upload request. - * @param context additional context that is passed through the Http pipeline during the service call. If no - * additional context is required, pass {@link Context#NONE} instead. - * @return the {@link Mono} that completes on completion of the upload request. - */ - private Mono splitAndUpload(String ruleId, String streamName, Iterable logs, - LogsUploadOptions options, Context context) { - - int concurrency = getConcurrency(options); - - return new Batcher(options, logs).toFlux() - .flatMapSequential(request -> uploadToService(ruleId, streamName, context, request), concurrency) - .handle((responseHolder, sink) -> processResponse(options, responseHolder, sink)) - .collectList() - .handle(this::processExceptions); - } - - private void processExceptions(List result, SynchronousSink sink) { - long failedLogsCount = 0L; - List exceptions = new ArrayList<>(); - for (LogsUploadException exception : result) { - exceptions.addAll(exception.getLogsUploadErrors()); - failedLogsCount += exception.getFailedLogsCount(); - } - if (!exceptions.isEmpty()) { - sink.error(new LogsUploadException(exceptions, failedLogsCount)); - } else { - sink.complete(); - } - } - - private void processResponse(LogsUploadOptions options, UploadLogsResponseHolder responseHolder, - SynchronousSink sink) { - if (responseHolder.getException() != null) { - Consumer uploadLogsErrorConsumer = null; - if (options != null) { - uploadLogsErrorConsumer = options.getLogsUploadErrorConsumer(); - } - if (uploadLogsErrorConsumer != null) { - uploadLogsErrorConsumer - .accept(new LogsUploadError(responseHolder.getException(), responseHolder.getRequest().getLogs())); - return; - } - // emit the responseHolder without the original logs only if there's an error and there's no - // error consumer - sink.next(new LogsUploadException(Collections.singletonList(responseHolder.getException()), - responseHolder.getRequest().getLogs().size())); - } - } - - private Mono uploadToService(String ruleId, String streamName, Context context, - LogsIngestionRequest request) { - RequestOptions requestOptions - = new RequestOptions().addHeader(HttpHeaderName.CONTENT_ENCODING, GZIP).setContext(context); - return service - .uploadWithResponse(ruleId, streamName, BinaryData.fromBytes(request.getRequestBody()), requestOptions) - .map(response -> new UploadLogsResponseHolder(null, null)) - .onErrorResume(HttpResponseException.class, - ex -> Mono.fromSupplier(() -> new UploadLogsResponseHolder(request, ex))); - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionClient.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionClient.java deleted file mode 100644 index 1bc0d05cc07c..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionClient.java +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.annotation.ReturnType; -import com.azure.core.annotation.ServiceClient; -import com.azure.core.annotation.ServiceMethod; -import com.azure.core.credential.TokenCredential; -import com.azure.core.exception.ClientAuthenticationException; -import com.azure.core.exception.HttpResponseException; -import com.azure.core.exception.ResourceModifiedException; -import com.azure.core.exception.ResourceNotFoundException; -import com.azure.core.http.HttpHeader; -import com.azure.core.http.HttpHeaderName; -import com.azure.core.http.rest.RequestOptions; -import com.azure.core.http.rest.Response; -import com.azure.core.util.BinaryData; -import com.azure.core.util.Context; -import com.azure.core.util.SharedExecutorService; -import com.azure.core.util.logging.ClientLogger; -import com.azure.monitor.ingestion.implementation.Batcher; -import com.azure.monitor.ingestion.implementation.IngestionUsingDataCollectionRulesClient; -import com.azure.monitor.ingestion.implementation.LogsIngestionRequest; -import com.azure.monitor.ingestion.implementation.UploadLogsResponseHolder; -import com.azure.monitor.ingestion.models.LogsUploadError; -import com.azure.monitor.ingestion.models.LogsUploadException; -import com.azure.monitor.ingestion.models.LogsUploadOptions; - -import java.util.List; -import java.util.Objects; -import java.util.concurrent.ExecutionException; -import java.util.function.Consumer; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static com.azure.monitor.ingestion.implementation.Utils.GZIP; -import static com.azure.monitor.ingestion.implementation.Utils.getConcurrency; -import static com.azure.monitor.ingestion.implementation.Utils.gzipRequest; - -/** - *

This class provides a synchronous client for uploading custom logs to an Azure Monitor Log Analytics workspace. - * This client encapsulates REST API calls, used to send data to a Log Analytics workspace, into a set of synchronous - * operations.

- * - *

Getting Started

- * - *

To create an instance of the {@link LogsIngestionClient}, use the {@link LogsIngestionClientBuilder} and configure - * the various options provided by the builder to customize the client as per your requirements. There are two required - * properties that should be set to build a client: - *

    - *
  1. {@code endpoint} - The data collection endpoint. - * See {@link LogsIngestionClientBuilder#endpoint(String) endpoint} method for more details.
  2. - *
  3. {@code credential} - The AAD authentication credential that has the "Monitoring Metrics Publisher" role assigned to it. - * Azure Identity - * provides a variety of AAD credential types that can be used. See - * {@link LogsIngestionClientBuilder#credential(TokenCredential) credential} method for more details.
  4. - *
- * - *

Instantiating a synchronous Logs ingestion client

- * - *
- * LogsIngestionClient logsIngestionClient = new LogsIngestionClientBuilder()
- *         .credential(tokenCredential)
- *         .endpoint("<data-collection-endpoint>")
- *         .buildClient();
- * 
- * - * - *

Client Usage

- * - *

- * For additional information on how to use this client, see the following method documentation: - *

- * - *
    - *
  • - * {@link #upload(String, String, Iterable) upload(String, String, Iterable)} - Uploads logs to a Log Analytics - * workspace. - *
  • - *
  • - * {@link #upload(String, String, Iterable, LogsUploadOptions) upload(String, String, Iterable, LogsUploadOptions)} - * - Uploads logs to a Log Analytics workspace with options to configure the upload request. - *
  • - *
  • - * {@link #uploadWithResponse(String, String, BinaryData, RequestOptions) uploadWithResponse(String, String, BinaryData, RequestOptions)} - * - Uploads logs to a Log Analytics workspace with options to configure the HTTP request. - *
  • - *
- * - * @see LogsIngestionClientBuilder - * @see LogsIngestionAsyncClient - * @see com.azure.monitor.ingestion - */ -@ServiceClient(builder = LogsIngestionClientBuilder.class) -public final class LogsIngestionClient implements AutoCloseable { - private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionClient.class); - private final IngestionUsingDataCollectionRulesClient client; - - /** - * Creates a {@link LogsIngestionClient} that sends requests to the data collection endpoint. - * - * @param client The {@link IngestionUsingDataCollectionRulesClient} that the client routes its request through. - */ - LogsIngestionClient(IngestionUsingDataCollectionRulesClient client) { - this.client = client; - } - - /** - * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be - * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split - * the input logs into multiple smaller requests before sending to the service. This method will block until all - * the logs are uploaded or an error occurs. - * - *

- * Each log in the input collection must be a valid JSON object. The JSON object should match the - * schema defined - * by the stream name. The stream's schema can be found in the Azure portal. - *

- * - *

Upload logs to Azure Monitor

- * - *
-     * List<Object> logs = getLogs();
-     * logsIngestionClient.upload("<data-collection-rule-id>", "<stream-name>", logs);
-     * System.out.println("Logs uploaded successfully");
-     * 
- * - * - * @param ruleId the data collection rule id that is configured to collect and transform the logs. - * @param streamName the stream name configured in data collection rule that matches defines the structure of the - * logs sent in this request. - * @param logs the collection of logs to be uploaded. - * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. - * @throws IllegalArgumentException if {@code logs} is empty. - */ - @ServiceMethod(returns = ReturnType.SINGLE) - public void upload(String ruleId, String streamName, Iterable logs) { - upload(ruleId, streamName, logs, null); - } - - /** - * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be - * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split - * the input logs into multiple smaller requests before sending to the service. This method will block until all - * the logs are uploaded or an error occurs. If an - * {@link LogsUploadOptions#setLogsUploadErrorConsumer(Consumer) error handler} is set, then the service errors are - * surfaced to the error handler and this method won't throw an exception. - * - *

- * Each log in the input collection must be a valid JSON object. The JSON object should match the - * schema defined - * by the stream name. The stream's schema can be found in the Azure portal. - *

- * - *

Upload logs to Azure Monitor

- * - *
-     * List<Object> logs = getLogs();
-     * LogsUploadOptions logsUploadOptions = new LogsUploadOptions().setMaxConcurrency(4);
-     * logsIngestionClient.upload("<data-collection-rule-id>", "<stream-name>", logs,
-     *         logsUploadOptions, Context.NONE);
-     * System.out.println("Logs uploaded successfully");
-     * 
- * - * - * @param ruleId the data collection rule id that is configured to collect and transform the logs. - * @param streamName the stream name configured in data collection rule that matches defines the structure of the - * logs sent in this request. - * @param logs the collection of logs to be uploaded. - * @param options the options to configure the upload request. - * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. - * @throws IllegalArgumentException if {@code logs} is empty. - */ - @ServiceMethod(returns = ReturnType.SINGLE) - public void upload(String ruleId, String streamName, Iterable logs, LogsUploadOptions options) { - upload(ruleId, streamName, logs, options, Context.NONE); - } - - /** - * Uploads logs to Azure Monitor with specified data collection rule id and stream name. The input logs may be - * too large to be sent as a single request to the Azure Monitor service. In such cases, this method will split - * the input logs into multiple smaller requests before sending to the service. This method will block until all - * the logs are uploaded or an error occurs. If an - * {@link LogsUploadOptions#setLogsUploadErrorConsumer(Consumer) error handler} is set, then the service errors are - * surfaced to the error handler and this method won't throw an exception. - * - *

- * Each log in the input collection must be a valid JSON object. The JSON object should match the - * schema defined - * by the stream name. The stream's schema can be found in the Azure portal. - *

- * - * @param ruleId the data collection rule id that is configured to collect and transform the logs. - * @param streamName the stream name configured in data collection rule that matches defines the structure of the - * logs sent in this request. - * @param logs the collection of logs to be uploaded. - * @param options the options to configure the upload request. - * @param context additional context that is passed through the Http pipeline during the service call. If no - * additional context is required, pass {@link Context#NONE} instead. - * @throws NullPointerException if any of {@code ruleId}, {@code streamName} or {@code logs} are null. - * @throws IllegalArgumentException if {@code logs} is empty. - */ - @ServiceMethod(returns = ReturnType.SINGLE) - public void upload(String ruleId, String streamName, Iterable logs, LogsUploadOptions options, - Context context) { - Objects.requireNonNull(ruleId, "'ruleId' cannot be null."); - Objects.requireNonNull(streamName, "'streamName' cannot be null."); - Objects.requireNonNull(logs, "'logs' cannot be null."); - - Consumer uploadLogsErrorConsumer - = options == null ? null : options.getLogsUploadErrorConsumer(); - - RequestOptions requestOptions = new RequestOptions(); - requestOptions.addHeader(HttpHeaderName.CONTENT_ENCODING, GZIP); - requestOptions.setContext(context); - - Stream responses - = new Batcher(options, logs).toStream().map(r -> uploadToService(ruleId, streamName, requestOptions, r)); - - responses = submit(responses, getConcurrency(options)).filter(response -> response.getException() != null); - - if (uploadLogsErrorConsumer != null) { - responses.forEach(response -> uploadLogsErrorConsumer - .accept(new LogsUploadError(response.getException(), response.getRequest().getLogs()))); - return; - } - - final int[] failedLogCount = new int[1]; - List exceptions = responses.map(response -> { - failedLogCount[0] += response.getRequest().getLogs().size(); - return response.getException(); - }).collect(Collectors.toList()); - - if (!exceptions.isEmpty()) { - throw LOGGER.logExceptionAsError(new LogsUploadException(exceptions, failedLogCount[0])); - } - } - - private Stream submit(Stream responseStream, int concurrency) { - if (concurrency == 1) { - return responseStream; - } - - try { - return SharedExecutorService.getInstance().submit(() -> responseStream).get(); - } catch (InterruptedException | ExecutionException e) { - throw LOGGER.logExceptionAsError(new RuntimeException(e)); - } - } - - private UploadLogsResponseHolder uploadToService(String ruleId, String streamName, RequestOptions requestOptions, - LogsIngestionRequest request) { - HttpResponseException exception = null; - try { - client.uploadWithResponse(ruleId, streamName, BinaryData.fromBytes(request.getRequestBody()), - requestOptions); - } catch (HttpResponseException ex) { - exception = ex; - } - - return new UploadLogsResponseHolder(request, exception); - } - - /** - * This method is used to upload logs to Azure Monitor Log Analytics with specified data collection rule id and - * stream name. This upload method provides a more granular control of the HTTP request sent to the service. Use - * {@link RequestOptions} to configure the HTTP request. - * - *

- * The input logs should be a JSON array with each element in the array - * matching the schema defined - * by the stream name. The stream's schema can be found in the Azure portal. This content will be gzipped before - * sending to the service. If the content is already gzipped, then set the {@code Content-Encoding} header to - * {@code gzip} using {@link RequestOptions#setHeader(HttpHeaderName, String) requestOptions} and pass the content - * as is. - *

- * - *

Header Parameters - * - * - * - * - * - * - *
Header Parameters
NameTypeRequiredDescription
Content-EncodingStringNogzip
x-ms-client-request-idStringNoClient request Id
- * - *

Request Body Schema - * - *

{@code
-     * [
-     *     Object
-     * ]
-     * }
- * - * @param ruleId The immutable Id of the Data Collection Rule resource. - * @param streamName The streamDeclaration name as defined in the Data Collection Rule. - * @param logs An array of objects matching the schema defined by the provided stream. - * @param requestOptions The options to configure the HTTP request before HTTP client sends it. - * @return the {@link Response}. - * @throws HttpResponseException thrown if the request is rejected by server. - * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. - * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. - * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - */ - @ServiceMethod(returns = ReturnType.SINGLE) - public Response uploadWithResponse(String ruleId, String streamName, BinaryData logs, - RequestOptions requestOptions) { - Objects.requireNonNull(ruleId, "'ruleId' cannot be null."); - Objects.requireNonNull(streamName, "'streamName' cannot be null."); - Objects.requireNonNull(logs, "'logs' cannot be null."); - - if (requestOptions == null) { - requestOptions = new RequestOptions(); - } - - requestOptions.addRequestCallback(request -> { - HttpHeader httpHeader = request.getHeaders().get(HttpHeaderName.CONTENT_ENCODING); - if (httpHeader == null) { - BinaryData gzippedRequest = BinaryData.fromBytes(gzipRequest(logs.toBytes())); - request.setBody(gzippedRequest); - request.setHeader(HttpHeaderName.CONTENT_ENCODING, GZIP); - } - }); - return client.uploadWithResponse(ruleId, streamName, logs, requestOptions); - } - - @Override - public void close() { - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionClientBuilder.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionClientBuilder.java deleted file mode 100644 index 33f1ac6e3990..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionClientBuilder.java +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.annotation.ServiceClientBuilder; -import com.azure.core.client.traits.ConfigurationTrait; -import com.azure.core.client.traits.EndpointTrait; -import com.azure.core.client.traits.HttpTrait; -import com.azure.core.client.traits.TokenCredentialTrait; -import com.azure.core.credential.TokenCredential; -import com.azure.core.http.HttpClient; -import com.azure.core.http.HttpPipeline; -import com.azure.core.http.policy.HttpLogOptions; -import com.azure.core.http.policy.HttpPipelinePolicy; -import com.azure.core.http.policy.RetryOptions; -import com.azure.core.http.policy.RetryPolicy; -import com.azure.core.util.ClientOptions; -import com.azure.core.util.Configuration; -import com.azure.core.util.logging.ClientLogger; -import com.azure.monitor.ingestion.implementation.IngestionUsingDataCollectionRulesClientBuilder; -import com.azure.monitor.ingestion.implementation.IngestionUsingDataCollectionRulesServiceVersion; -import com.azure.monitor.ingestion.models.LogsIngestionAudience; - -import java.net.MalformedURLException; -import java.net.URL; - -/** - * Fluent builder for creating instances of {@link LogsIngestionClient} and {@link LogsIngestionAsyncClient}. The - * builder provides various options to customize the client as per your requirements. - * - *

There are two required properties that should be set to build a client: - *

    - *
  1. {@code endpoint} - The data collection endpoint. - * See {@link LogsIngestionClientBuilder#endpoint(String) endpoint} method for more details.
  2. - *
  3. {@code credential} - The AAD authentication credential that has the "Monitoring Metrics Publisher" role assigned to it. - * Azure Identity - * provides a variety of AAD credential types that can be used. See - * {@link LogsIngestionClientBuilder#credential(TokenCredential) credential} method for more details.
  4. - *
- * - *

Instantiating an asynchronous Logs ingestion client

- * - *
- * LogsIngestionAsyncClient logsIngestionAsyncClient = new LogsIngestionClientBuilder()
- *         .credential(tokenCredential)
- *         .endpoint("<data-collection-endpoint>")
- *         .buildAsyncClient();
- * 
- * - * - *

Instantiating a synchronous Logs ingestion client

- * - *
- * LogsIngestionClient logsIngestionClient = new LogsIngestionClientBuilder()
- *         .credential(tokenCredential)
- *         .endpoint("<data-collection-endpoint>")
- *         .buildClient();
- * 
- * - */ -@ServiceClientBuilder(serviceClients = { LogsIngestionClient.class, LogsIngestionAsyncClient.class }) -public final class LogsIngestionClientBuilder - implements ConfigurationTrait, HttpTrait, - EndpointTrait, TokenCredentialTrait { - private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionClientBuilder.class); - private final IngestionUsingDataCollectionRulesClientBuilder innerLogBuilder - = new IngestionUsingDataCollectionRulesClientBuilder(); - private String endpoint; - private TokenCredential tokenCredential; - - /** - * Creates a new instance of {@link LogsIngestionClientBuilder}. - */ - public LogsIngestionClientBuilder() { - - } - - /** - * Sets the data collection endpoint. - * - * @param endpoint the data collection endpoint. - * @return the updated {@link LogsIngestionClientBuilder}. - */ - @Override - public LogsIngestionClientBuilder endpoint(String endpoint) { - try { - new URL(endpoint); - innerLogBuilder.endpoint(endpoint); - this.endpoint = endpoint; - return this; - } catch (MalformedURLException exception) { - throw LOGGER - .logExceptionAsError(new IllegalArgumentException("'endpoint' must be a valid URL.", exception)); - } - } - - /** - * {@inheritDoc} - */ - @Override - public LogsIngestionClientBuilder pipeline(HttpPipeline pipeline) { - innerLogBuilder.pipeline(pipeline); - return this; - } - - /** - * {@inheritDoc} - */ - @Override - public LogsIngestionClientBuilder httpClient(HttpClient httpClient) { - innerLogBuilder.httpClient(httpClient); - return this; - } - - /** - * {@inheritDoc} - */ - @Override - public LogsIngestionClientBuilder configuration(Configuration configuration) { - innerLogBuilder.configuration(configuration); - return this; - } - - /** - * {@inheritDoc} - */ - @Override - public LogsIngestionClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { - innerLogBuilder.httpLogOptions(httpLogOptions); - return this; - } - - /** - * Sets The retry policy that will attempt to retry failed requests, if applicable. - * - * @param retryPolicy the retryPolicy value. - * @return the updated {@link LogsIngestionClientBuilder}. - */ - public LogsIngestionClientBuilder retryPolicy(RetryPolicy retryPolicy) { - innerLogBuilder.retryPolicy(retryPolicy); - return this; - } - - /** - * {@inheritDoc} - */ - @Override - public LogsIngestionClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { - innerLogBuilder.addPolicy(customPolicy); - return this; - } - - /** - * {@inheritDoc} - */ - @Override - public LogsIngestionClientBuilder retryOptions(RetryOptions retryOptions) { - innerLogBuilder.retryOptions(retryOptions); - return this; - } - - /** - * Sets the AAD authentication credential that has the "Monitoring Metrics Publisher" role assigned to it. - * Azure Identity - * provides a variety of AAD credential types that can be used. - * - * @param tokenCredential the tokenCredential value. - * @return the updated {@link LogsIngestionClientBuilder}. - */ - @Override - public LogsIngestionClientBuilder credential(TokenCredential tokenCredential) { - innerLogBuilder.credential(tokenCredential); - this.tokenCredential = tokenCredential; - return this; - } - - /** - * Sets the audience for the authorization scope of log ingestion clients. If this value is not set, the default - * audience will be the azure public cloud. - * - * @param audience the audience value. - * @return the updated {@link LogsIngestionClientBuilder}. - */ - public LogsIngestionClientBuilder audience(LogsIngestionAudience audience) { - innerLogBuilder.audience(audience); - return this; - } - - /** - * {@inheritDoc} - */ - @Override - public LogsIngestionClientBuilder clientOptions(ClientOptions clientOptions) { - innerLogBuilder.clientOptions(clientOptions); - return this; - } - - /** - * The service version to use when creating the client. By default, the latest service version is used. - * This is the value returned by the {@link LogsIngestionServiceVersion#getLatest() getLatest} method. - * - * @param serviceVersion The {@link LogsIngestionServiceVersion}. - * @return the updated {@link LogsIngestionClientBuilder}. - */ - public LogsIngestionClientBuilder serviceVersion(LogsIngestionServiceVersion serviceVersion) { - innerLogBuilder - .serviceVersion(IngestionUsingDataCollectionRulesServiceVersion.valueOf(serviceVersion.getVersion())); - return this; - } - - /** - * Creates a synchronous client with the configured options in this builder. - * - * @return A synchronous {@link LogsIngestionClient}. - */ - public LogsIngestionClient buildClient() { - if (endpoint == null) { - throw LOGGER.logExceptionAsError(new IllegalStateException("endpoint is required to build the client.")); - } - if (tokenCredential == null) { - throw LOGGER.logExceptionAsError(new IllegalStateException("credential is required to build the client.")); - } - return new LogsIngestionClient(innerLogBuilder.buildClient()); - } - - /** - * Creates an asynchronous client with the configured options in this builder. - * - * @return An asynchronous {@link LogsIngestionAsyncClient}. - */ - public LogsIngestionAsyncClient buildAsyncClient() { - if (endpoint == null) { - throw LOGGER.logExceptionAsError(new IllegalStateException("endpoint is required to build the client.")); - } - if (tokenCredential == null) { - throw LOGGER.logExceptionAsError(new IllegalStateException("credential is required to build the client.")); - } - return new LogsIngestionAsyncClient(innerLogBuilder.buildAsyncClient()); - } - -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionServiceVersion.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionServiceVersion.java deleted file mode 100644 index 7405124c36b2..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/LogsIngestionServiceVersion.java +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.util.ServiceVersion; - -/** - * The service version of the Azure Monitor service to upload logs. - */ -public enum LogsIngestionServiceVersion implements ServiceVersion { - /** - * Service version {@code 2023-01-01}. - */ - V2023_01_01("2023-01-01"); - - final String version; - - /** - * The service version. - * @param version The service version. - */ - LogsIngestionServiceVersion(String version) { - this.version = version; - } - - /** - * Returns the latest supported service version by this library. - * @return The latest supported service version by this library. - */ - public static LogsIngestionServiceVersion getLatest() { - return V2023_01_01; - } - - @Override - public String getVersion() { - return version; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/Batcher.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/Batcher.java deleted file mode 100644 index 281a78354d46..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/Batcher.java +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion.implementation; - -import com.azure.core.util.logging.ClientLogger; -import com.azure.core.util.serializer.JsonSerializer; -import com.azure.core.util.serializer.JsonSerializerProviders; -import com.azure.core.util.serializer.ObjectSerializer; -import com.azure.json.JsonProviders; -import com.azure.json.JsonWriter; -import com.azure.monitor.ingestion.models.LogsUploadOptions; -import reactor.core.publisher.Flux; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Spliterator; -import java.util.Spliterators; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; - -import static com.azure.monitor.ingestion.implementation.Utils.MAX_REQUEST_PAYLOAD_SIZE; -import static com.azure.monitor.ingestion.implementation.Utils.getConcurrency; -import static com.azure.monitor.ingestion.implementation.Utils.gzipRequest; - -/** - * Provides iterator and streams for batches over log objects. - */ -public class Batcher implements Iterator { - private static final ClientLogger LOGGER = new ClientLogger(Batcher.class); - private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true); - private final ObjectSerializer serializer; - private final int concurrency; - private final Iterator iterator; - private long currentBatchSize; - private List serializedLogs; - private List originalLogsRequest; - - public Batcher(LogsUploadOptions options, Iterable logs) { - this.serializer = getSerializer(options); - this.concurrency = getConcurrency(options); - this.serializedLogs = new ArrayList<>(); - this.originalLogsRequest = new ArrayList<>(); - this.iterator = logs.iterator(); - } - - /** - * Checks if there are more logs to batch. This method is not thread safe! - * - * When used concurrently, it should be synchronized along with {@link Batcher#next()}: - * - *
{@code
-     * synchronized (batcher) {
-     *     if (batcher.hasNext()) {
-     *         request = batcher.next();
-     *     }
-     * }
-     * }
- */ - @Override - public boolean hasNext() { - return iterator.hasNext() || currentBatchSize > 0; - } - - /** - * Collects next batch and serializes it into {@link LogsIngestionRequest}. This method is not thread-safe! - * - * Returns null when complete. - */ - @Override - public LogsIngestionRequest next() { - try { - return nextInternal(); - } catch (IOException ex) { - throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); - } - } - - /** - * Creates stream of requests split for configured concurrency. Returns parallel stream if concurrency is bigger than 1. - */ - public Stream toStream() { - if (concurrency == 1) { - return StreamSupport - .stream(Spliterators.spliteratorUnknownSize(this, Spliterator.NONNULL | Spliterator.ORDERED), false); - } - - return StreamSupport.stream(new ConcurrencyLimitingSpliterator<>(this, concurrency), true); - } - - /** - * Creates flux with requests. - */ - public Flux toFlux() { - return Flux.create(emitter -> { - try { - while (hasNext()) { - LogsIngestionRequest next = nextInternal(); - if (next != null) { - emitter.next(next); - } - } - } catch (IOException ex) { - emitter.error(ex); - } - - emitter.complete(); - }); - } - - private LogsIngestionRequest nextInternal() throws IOException { - LogsIngestionRequest result = null; - while (iterator.hasNext() && result == null) { - Object currentLog = iterator.next(); - byte[] bytes = serializer.serializeToBytes(currentLog); - currentBatchSize += bytes.length; - if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) { - result = createRequest(false); - currentBatchSize = bytes.length; - } - - serializedLogs.add(new String(bytes, StandardCharsets.UTF_8)); - originalLogsRequest.add(currentLog); - } - - if (result == null && currentBatchSize > 0) { - currentBatchSize = 0; - return createRequest(true); - } - - return result; - } - - private LogsIngestionRequest createRequest(boolean last) throws IOException { - try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - JsonWriter writer = JsonProviders.createWriter(byteArrayOutputStream)) { - writer.writeStartArray(); - for (String log : serializedLogs) { - writer.writeRawValue(log); - } - writer.writeEndArray(); - writer.flush(); - byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray()); - return new LogsIngestionRequest(originalLogsRequest, zippedRequestBody); - } finally { - if (!last) { - originalLogsRequest = new ArrayList<>(); - serializedLogs.clear(); - } - } - } - - private static ObjectSerializer getSerializer(LogsUploadOptions options) { - if (options != null && options.getObjectSerializer() != null) { - return options.getObjectSerializer(); - } - - return DEFAULT_SERIALIZER; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/ConcurrencyLimitingSpliterator.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/ConcurrencyLimitingSpliterator.java deleted file mode 100644 index d7cf05e73e0b..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/ConcurrencyLimitingSpliterator.java +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion.implementation; - -import java.util.Iterator; -import java.util.Objects; -import java.util.Spliterator; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; - -/** - * Splits list of items into a given number of sub-lists allowing to process - * sub-lists concurrently. - *

- * Follows example here: https://docs.oracle.com/javase/8/docs/api/java/util/Spliterator.html - */ -class ConcurrencyLimitingSpliterator implements Spliterator { - private final AtomicInteger concurrency; - private final Iterator iterator; - - /** - * Creates spliterator. - * - * @param concurrency Number of sub-lists to split items to. When processing items concurrently, - * indicates number of threads to process items with. - */ - ConcurrencyLimitingSpliterator(Iterator iterator, int concurrency) { - Objects.requireNonNull(iterator, "'iterator' cannot be null"); - if (concurrency == 0) { - throw new IllegalArgumentException("'concurrency' must be a positive number."); - } - - this.concurrency = new AtomicInteger(concurrency); - this.iterator = iterator; - } - - @Override - public boolean tryAdvance(Consumer action) { - // this method is called on individual spliterators concurrently - // it synchronizes access to logs iterator while requesting the next batch. - T request = null; - synchronized (iterator) { - if (iterator.hasNext()) { - request = iterator.next(); - } - } - - if (request != null) { - action.accept(request); - return true; - } - - return false; - } - - @Override - public Spliterator trySplit() { - // here we split the stream, creating multiple spliterators that will be executed concurrently - return concurrency.getAndDecrement() > 1 ? new ConcurrencyLimitingSpliterator<>(iterator, 1) : null; - } - - @Override - public long estimateSize() { - return Integer.MAX_VALUE; - } - - @Override - public int characteristics() { - return NONNULL | ORDERED & ~(Spliterator.SIZED | Spliterator.SUBSIZED); - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesAsyncClient.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesAsyncClient.java deleted file mode 100644 index cb070828fbc4..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesAsyncClient.java +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) AutoRest Code Generator. - -package com.azure.monitor.ingestion.implementation; - -import com.azure.core.annotation.Generated; -import com.azure.core.annotation.ReturnType; -import com.azure.core.annotation.ServiceClient; -import com.azure.core.annotation.ServiceMethod; -import com.azure.core.exception.ClientAuthenticationException; -import com.azure.core.exception.HttpResponseException; -import com.azure.core.exception.ResourceModifiedException; -import com.azure.core.exception.ResourceNotFoundException; -import com.azure.core.http.rest.RequestOptions; -import com.azure.core.http.rest.Response; -import com.azure.core.util.BinaryData; -import reactor.core.publisher.Mono; - -/** - * Initializes a new instance of the asynchronous IngestionUsingDataCollectionRulesClient type. - */ -@ServiceClient(builder = IngestionUsingDataCollectionRulesClientBuilder.class, isAsync = true) -public final class IngestionUsingDataCollectionRulesAsyncClient { - @Generated - private final IngestionUsingDataCollectionRulesClientImpl serviceClient; - - /** - * Initializes an instance of IngestionUsingDataCollectionRulesAsyncClient class. - * - * @param serviceClient the service client implementation. - */ - @Generated - IngestionUsingDataCollectionRulesAsyncClient(IngestionUsingDataCollectionRulesClientImpl serviceClient) { - this.serviceClient = serviceClient; - } - - /** - * Ingestion API used to directly ingest data using Data Collection Rules - * - * See error response code and error response message for more detail. - *

Header Parameters

- * - * - * - * - * - *
Header Parameters
NameTypeRequiredDescription
Content-EncodingStringNogzip
x-ms-client-request-idStringNoClient request Id
- * You can add these to a request with {@link RequestOptions#addHeader} - *

Request Body Schema

- * - *
-     * {@code
-     * [
-     *     Object (Required)
-     * ]
-     * }
-     * 
- * - * @param ruleId The immutable Id of the Data Collection Rule resource. - * @param stream The streamDeclaration name as defined in the Data Collection Rule. - * @param body An array of objects matching the schema defined by the provided stream. - * @param requestOptions The options to configure the HTTP request before HTTP client sends it. - * @throws HttpResponseException thrown if the request is rejected by server. - * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. - * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. - * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - * @return the {@link Response} on successful completion of {@link Mono}. - */ - @Generated - @ServiceMethod(returns = ReturnType.SINGLE) - public Mono> uploadWithResponse(String ruleId, String stream, BinaryData body, - RequestOptions requestOptions) { - return this.serviceClient.uploadWithResponseAsync(ruleId, stream, body, requestOptions); - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesClient.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesClient.java deleted file mode 100644 index e020ab6ae74a..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesClient.java +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) AutoRest Code Generator. - -package com.azure.monitor.ingestion.implementation; - -import com.azure.core.annotation.Generated; -import com.azure.core.annotation.ReturnType; -import com.azure.core.annotation.ServiceClient; -import com.azure.core.annotation.ServiceMethod; -import com.azure.core.exception.ClientAuthenticationException; -import com.azure.core.exception.HttpResponseException; -import com.azure.core.exception.ResourceModifiedException; -import com.azure.core.exception.ResourceNotFoundException; -import com.azure.core.http.rest.RequestOptions; -import com.azure.core.http.rest.Response; -import com.azure.core.util.BinaryData; - -/** - * Initializes a new instance of the synchronous IngestionUsingDataCollectionRulesClient type. - */ -@ServiceClient(builder = IngestionUsingDataCollectionRulesClientBuilder.class) -public final class IngestionUsingDataCollectionRulesClient { - @Generated - private final IngestionUsingDataCollectionRulesClientImpl serviceClient; - - /** - * Initializes an instance of IngestionUsingDataCollectionRulesClient class. - * - * @param serviceClient the service client implementation. - */ - @Generated - IngestionUsingDataCollectionRulesClient(IngestionUsingDataCollectionRulesClientImpl serviceClient) { - this.serviceClient = serviceClient; - } - - /** - * Ingestion API used to directly ingest data using Data Collection Rules - * - * See error response code and error response message for more detail. - *

Header Parameters

- * - * - * - * - * - *
Header Parameters
NameTypeRequiredDescription
Content-EncodingStringNogzip
x-ms-client-request-idStringNoClient request Id
- * You can add these to a request with {@link RequestOptions#addHeader} - *

Request Body Schema

- * - *
-     * {@code
-     * [
-     *     Object (Required)
-     * ]
-     * }
-     * 
- * - * @param ruleId The immutable Id of the Data Collection Rule resource. - * @param stream The streamDeclaration name as defined in the Data Collection Rule. - * @param body An array of objects matching the schema defined by the provided stream. - * @param requestOptions The options to configure the HTTP request before HTTP client sends it. - * @throws HttpResponseException thrown if the request is rejected by server. - * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. - * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. - * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - * @return the {@link Response}. - */ - @Generated - @ServiceMethod(returns = ReturnType.SINGLE) - public Response uploadWithResponse(String ruleId, String stream, BinaryData body, - RequestOptions requestOptions) { - return this.serviceClient.uploadWithResponse(ruleId, stream, body, requestOptions); - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesClientBuilder.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesClientBuilder.java deleted file mode 100644 index 9b30c1f5ec0e..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesClientBuilder.java +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) AutoRest Code Generator. -package com.azure.monitor.ingestion.implementation; - -import com.azure.core.annotation.Generated; -import com.azure.core.annotation.ServiceClientBuilder; -import com.azure.core.client.traits.ConfigurationTrait; -import com.azure.core.client.traits.EndpointTrait; -import com.azure.core.client.traits.HttpTrait; -import com.azure.core.client.traits.TokenCredentialTrait; -import com.azure.core.credential.TokenCredential; -import com.azure.core.http.HttpClient; -import com.azure.core.http.HttpHeaders; -import com.azure.core.http.HttpPipeline; -import com.azure.core.http.HttpPipelineBuilder; -import com.azure.core.http.HttpPipelinePosition; -import com.azure.core.http.policy.AddDatePolicy; -import com.azure.core.http.policy.AddHeadersFromContextPolicy; -import com.azure.core.http.policy.AddHeadersPolicy; -import com.azure.core.http.policy.BearerTokenAuthenticationPolicy; -import com.azure.core.http.policy.HttpLogOptions; -import com.azure.core.http.policy.HttpLoggingPolicy; -import com.azure.core.http.policy.HttpPipelinePolicy; -import com.azure.core.http.policy.HttpPolicyProviders; -import com.azure.core.http.policy.RequestIdPolicy; -import com.azure.core.http.policy.RetryOptions; -import com.azure.core.http.policy.RetryPolicy; -import com.azure.core.http.policy.UserAgentPolicy; -import com.azure.core.util.ClientOptions; -import com.azure.core.util.Configuration; -import com.azure.core.util.CoreUtils; -import com.azure.core.util.builder.ClientBuilderUtil; -import com.azure.core.util.logging.ClientLogger; -import com.azure.core.util.serializer.JacksonAdapter; -import com.azure.monitor.ingestion.models.LogsIngestionAudience; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -/** - * A builder for creating a new instance of the IngestionUsingDataCollectionRulesClient type. - */ -@ServiceClientBuilder( - serviceClients = { - IngestionUsingDataCollectionRulesClient.class, - IngestionUsingDataCollectionRulesAsyncClient.class }) -public final class IngestionUsingDataCollectionRulesClientBuilder - implements HttpTrait, - ConfigurationTrait, - TokenCredentialTrait, - EndpointTrait { - - @Generated - private static final String SDK_NAME = "name"; - - @Generated - private static final String SDK_VERSION = "version"; - - @Generated - private static final String[] DEFAULT_SCOPES = new String[] { "https://monitor.azure.com//.default" }; - - @Generated - private static final Map PROPERTIES = CoreUtils.getProperties("azure-monitor-ingestion.properties"); - - @Generated - private final List pipelinePolicies; - - /** - * Create an instance of the IngestionUsingDataCollectionRulesClientBuilder. - */ - @Generated - public IngestionUsingDataCollectionRulesClientBuilder() { - this.pipelinePolicies = new ArrayList<>(); - } - - /* - * The HTTP pipeline to send requests through. - */ - @Generated - private HttpPipeline pipeline; - - /** - * {@inheritDoc}. - */ - @Generated - @Override - public IngestionUsingDataCollectionRulesClientBuilder pipeline(HttpPipeline pipeline) { - if (this.pipeline != null && pipeline == null) { - LOGGER.atInfo().log("HttpPipeline is being set to 'null' when it was previously configured."); - } - this.pipeline = pipeline; - return this; - } - - /* - * The HTTP client used to send the request. - */ - @Generated - private HttpClient httpClient; - - /** - * {@inheritDoc}. - */ - @Generated - @Override - public IngestionUsingDataCollectionRulesClientBuilder httpClient(HttpClient httpClient) { - this.httpClient = httpClient; - return this; - } - - /* - * The logging configuration for HTTP requests and responses. - */ - @Generated - private HttpLogOptions httpLogOptions; - - /** - * {@inheritDoc}. - */ - @Generated - @Override - public IngestionUsingDataCollectionRulesClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { - this.httpLogOptions = httpLogOptions; - return this; - } - - /* - * The client options such as application ID and custom headers to set on a request. - */ - @Generated - private ClientOptions clientOptions; - - /** - * {@inheritDoc}. - */ - @Generated - @Override - public IngestionUsingDataCollectionRulesClientBuilder clientOptions(ClientOptions clientOptions) { - this.clientOptions = clientOptions; - return this; - } - - /* - * The retry options to configure retry policy for failed requests. - */ - @Generated - private RetryOptions retryOptions; - - /** - * {@inheritDoc}. - */ - @Generated - @Override - public IngestionUsingDataCollectionRulesClientBuilder retryOptions(RetryOptions retryOptions) { - this.retryOptions = retryOptions; - return this; - } - - /** - * {@inheritDoc}. - */ - @Generated - @Override - public IngestionUsingDataCollectionRulesClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { - Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null."); - pipelinePolicies.add(customPolicy); - return this; - } - - /* - * The configuration store that is used during construction of the service client. - */ - @Generated - private Configuration configuration; - - /** - * {@inheritDoc}. - */ - @Generated - @Override - public IngestionUsingDataCollectionRulesClientBuilder configuration(Configuration configuration) { - this.configuration = configuration; - return this; - } - - /* - * The TokenCredential used for authentication. - */ - @Generated - private TokenCredential tokenCredential; - - /** - * {@inheritDoc}. - */ - @Generated - @Override - public IngestionUsingDataCollectionRulesClientBuilder credential(TokenCredential tokenCredential) { - this.tokenCredential = tokenCredential; - return this; - } - - /* - * The service endpoint - */ - @Generated - private String endpoint; - - /** - * {@inheritDoc}. - */ - @Generated - @Override - public IngestionUsingDataCollectionRulesClientBuilder endpoint(String endpoint) { - this.endpoint = endpoint; - return this; - } - - /* - * Service version - */ - @Generated - private IngestionUsingDataCollectionRulesServiceVersion serviceVersion; - - /** - * Sets Service version. - * - * @param serviceVersion the serviceVersion value. - * @return the IngestionUsingDataCollectionRulesClientBuilder. - */ - @Generated - public IngestionUsingDataCollectionRulesClientBuilder - serviceVersion(IngestionUsingDataCollectionRulesServiceVersion serviceVersion) { - this.serviceVersion = serviceVersion; - return this; - } - - /* - * The retry policy that will attempt to retry failed requests, if applicable. - */ - @Generated - private RetryPolicy retryPolicy; - - /** - * Sets The retry policy that will attempt to retry failed requests, if applicable. - * - * @param retryPolicy the retryPolicy value. - * @return the IngestionUsingDataCollectionRulesClientBuilder. - */ - @Generated - public IngestionUsingDataCollectionRulesClientBuilder retryPolicy(RetryPolicy retryPolicy) { - this.retryPolicy = retryPolicy; - return this; - } - - /** - * Builds an instance of IngestionUsingDataCollectionRulesClientImpl with the provided parameters. - * - * @return an instance of IngestionUsingDataCollectionRulesClientImpl. - */ - @Generated - private IngestionUsingDataCollectionRulesClientImpl buildInnerClient() { - this.validateClient(); - HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline(); - IngestionUsingDataCollectionRulesServiceVersion localServiceVersion - = (serviceVersion != null) ? serviceVersion : IngestionUsingDataCollectionRulesServiceVersion.getLatest(); - IngestionUsingDataCollectionRulesClientImpl client = new IngestionUsingDataCollectionRulesClientImpl( - localPipeline, JacksonAdapter.createDefaultSerializerAdapter(), this.endpoint, localServiceVersion); - return client; - } - - @Generated - private void validateClient() { - // This method is invoked from 'buildInnerClient'/'buildClient' method. - // Developer can customize this method, to validate that the necessary conditions are met for the new client. - Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); - } - - @Generated - private HttpPipeline createHttpPipeline() { - Configuration buildConfiguration - = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; - HttpLogOptions localHttpLogOptions = this.httpLogOptions == null ? new HttpLogOptions() : this.httpLogOptions; - ClientOptions localClientOptions = this.clientOptions == null ? new ClientOptions() : this.clientOptions; - List policies = new ArrayList<>(); - String clientName = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); - String clientVersion = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); - String applicationId = CoreUtils.getApplicationId(localClientOptions, localHttpLogOptions); - policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); - policies.add(new RequestIdPolicy()); - policies.add(new AddHeadersFromContextPolicy()); - HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(localClientOptions); - if (headers != null) { - policies.add(new AddHeadersPolicy(headers)); - } - this.pipelinePolicies.stream() - .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) - .forEach(p -> policies.add(p)); - HttpPolicyProviders.addBeforeRetryPolicies(policies); - policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); - policies.add(new AddDatePolicy()); - if (tokenCredential != null) { - policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, - audience == null ? DEFAULT_SCOPES : new String[] { audience.toString() })); - } - this.pipelinePolicies.stream() - .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) - .forEach(p -> policies.add(p)); - HttpPolicyProviders.addAfterRetryPolicies(policies); - policies.add(new HttpLoggingPolicy(localHttpLogOptions)); - HttpPipeline httpPipeline = new HttpPipelineBuilder().policies(policies.toArray(new HttpPipelinePolicy[0])) - .httpClient(httpClient) - .clientOptions(localClientOptions) - .build(); - return httpPipeline; - } - - /** - * Builds an instance of IngestionUsingDataCollectionRulesAsyncClient class. - * - * @return an instance of IngestionUsingDataCollectionRulesAsyncClient. - */ - @Generated - public IngestionUsingDataCollectionRulesAsyncClient buildAsyncClient() { - return new IngestionUsingDataCollectionRulesAsyncClient(buildInnerClient()); - } - - /** - * Builds an instance of IngestionUsingDataCollectionRulesClient class. - * - * @return an instance of IngestionUsingDataCollectionRulesClient. - */ - @Generated - public IngestionUsingDataCollectionRulesClient buildClient() { - return new IngestionUsingDataCollectionRulesClient(buildInnerClient()); - } - - private static final ClientLogger LOGGER = new ClientLogger(IngestionUsingDataCollectionRulesClientBuilder.class); - - /** - * The audience indicating the authorization scope of log ingestion clients. - */ - @Generated() - private LogsIngestionAudience audience; - - /** - * Sets the audience. - * - * @param audience the audience indicating the authorization scope of log ingestion clients. - * @return the IngestionUsingDataCollectionRulesClientBuilder. - */ - @Generated() - public IngestionUsingDataCollectionRulesClientBuilder audience(LogsIngestionAudience audience) { - this.audience = audience; - return this; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesClientImpl.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesClientImpl.java deleted file mode 100644 index e9496c9bc9d1..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesClientImpl.java +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) AutoRest Code Generator. - -package com.azure.monitor.ingestion.implementation; - -import com.azure.core.annotation.BodyParam; -import com.azure.core.annotation.ExpectedResponses; -import com.azure.core.annotation.HeaderParam; -import com.azure.core.annotation.Host; -import com.azure.core.annotation.HostParam; -import com.azure.core.annotation.PathParam; -import com.azure.core.annotation.Post; -import com.azure.core.annotation.QueryParam; -import com.azure.core.annotation.ReturnType; -import com.azure.core.annotation.ServiceInterface; -import com.azure.core.annotation.ServiceMethod; -import com.azure.core.annotation.UnexpectedResponseExceptionType; -import com.azure.core.exception.ClientAuthenticationException; -import com.azure.core.exception.HttpResponseException; -import com.azure.core.exception.ResourceModifiedException; -import com.azure.core.exception.ResourceNotFoundException; -import com.azure.core.http.HttpPipeline; -import com.azure.core.http.HttpPipelineBuilder; -import com.azure.core.http.policy.RetryPolicy; -import com.azure.core.http.policy.UserAgentPolicy; -import com.azure.core.http.rest.RequestOptions; -import com.azure.core.http.rest.Response; -import com.azure.core.http.rest.RestProxy; -import com.azure.core.util.BinaryData; -import com.azure.core.util.Context; -import com.azure.core.util.FluxUtil; -import com.azure.core.util.logging.ClientLogger; -import com.azure.core.util.serializer.JacksonAdapter; -import com.azure.core.util.serializer.SerializerAdapter; -import reactor.core.publisher.Mono; - -/** - * Initializes a new instance of the IngestionUsingDataCollectionRulesClient type. - */ -public final class IngestionUsingDataCollectionRulesClientImpl { - /** - * The proxy service used to perform REST calls. - */ - private final IngestionUsingDataCollectionRulesClientService service; - - /** - * The Data Collection Endpoint for the Data Collection Rule, for example - * https://dce-name.eastus-2.ingest.monitor.azure.com. - */ - private final String endpoint; - - /** - * Gets The Data Collection Endpoint for the Data Collection Rule, for example - * https://dce-name.eastus-2.ingest.monitor.azure.com. - * - * @return the endpoint value. - */ - public String getEndpoint() { - return this.endpoint; - } - - /** - * Service version. - */ - private final IngestionUsingDataCollectionRulesServiceVersion serviceVersion; - - /** - * Gets Service version. - * - * @return the serviceVersion value. - */ - public IngestionUsingDataCollectionRulesServiceVersion getServiceVersion() { - return this.serviceVersion; - } - - /** - * The HTTP pipeline to send requests through. - */ - private final HttpPipeline httpPipeline; - - /** - * Gets The HTTP pipeline to send requests through. - * - * @return the httpPipeline value. - */ - public HttpPipeline getHttpPipeline() { - return this.httpPipeline; - } - - /** - * The serializer to serialize an object into a string. - */ - private final SerializerAdapter serializerAdapter; - - /** - * Gets The serializer to serialize an object into a string. - * - * @return the serializerAdapter value. - */ - public SerializerAdapter getSerializerAdapter() { - return this.serializerAdapter; - } - - /** - * Initializes an instance of IngestionUsingDataCollectionRulesClient client. - * - * @param endpoint The Data Collection Endpoint for the Data Collection Rule, for example - * https://dce-name.eastus-2.ingest.monitor.azure.com. - * @param serviceVersion Service version. - */ - IngestionUsingDataCollectionRulesClientImpl(String endpoint, - IngestionUsingDataCollectionRulesServiceVersion serviceVersion) { - this(new HttpPipelineBuilder().policies(new UserAgentPolicy(), new RetryPolicy()).build(), - JacksonAdapter.createDefaultSerializerAdapter(), endpoint, serviceVersion); - } - - /** - * Initializes an instance of IngestionUsingDataCollectionRulesClient client. - * - * @param httpPipeline The HTTP pipeline to send requests through. - * @param endpoint The Data Collection Endpoint for the Data Collection Rule, for example - * https://dce-name.eastus-2.ingest.monitor.azure.com. - * @param serviceVersion Service version. - */ - IngestionUsingDataCollectionRulesClientImpl(HttpPipeline httpPipeline, String endpoint, - IngestionUsingDataCollectionRulesServiceVersion serviceVersion) { - this(httpPipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, serviceVersion); - } - - /** - * Initializes an instance of IngestionUsingDataCollectionRulesClient client. - * - * @param httpPipeline The HTTP pipeline to send requests through. - * @param serializerAdapter The serializer to serialize an object into a string. - * @param endpoint The Data Collection Endpoint for the Data Collection Rule, for example - * https://dce-name.eastus-2.ingest.monitor.azure.com. - * @param serviceVersion Service version. - */ - IngestionUsingDataCollectionRulesClientImpl(HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, - String endpoint, IngestionUsingDataCollectionRulesServiceVersion serviceVersion) { - this.httpPipeline = httpPipeline; - this.serializerAdapter = serializerAdapter; - this.endpoint = endpoint; - this.serviceVersion = serviceVersion; - this.service = RestProxy.create(IngestionUsingDataCollectionRulesClientService.class, this.httpPipeline, - this.getSerializerAdapter()); - } - - /** - * The interface defining all the services for IngestionUsingDataCollectionRulesClient to be used by the proxy - * service to perform REST calls. - */ - @Host("{endpoint}") - @ServiceInterface(name = "IngestionUsingDataCo") - public interface IngestionUsingDataCollectionRulesClientService { - @Post("/dataCollectionRules/{ruleId}/streams/{stream}") - @ExpectedResponses({ 204 }) - @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) - @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) - @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) - @UnexpectedResponseExceptionType(HttpResponseException.class) - Mono> upload(@HostParam("endpoint") String endpoint, @PathParam("ruleId") String ruleId, - @PathParam("stream") String stream, @QueryParam("api-version") String apiVersion, - @BodyParam("application/json") BinaryData body, @HeaderParam("Accept") String accept, - RequestOptions requestOptions, Context context); - - @Post("/dataCollectionRules/{ruleId}/streams/{stream}") - @ExpectedResponses({ 204 }) - @UnexpectedResponseExceptionType(value = ClientAuthenticationException.class, code = { 401 }) - @UnexpectedResponseExceptionType(value = ResourceNotFoundException.class, code = { 404 }) - @UnexpectedResponseExceptionType(value = ResourceModifiedException.class, code = { 409 }) - @UnexpectedResponseExceptionType(HttpResponseException.class) - Response uploadSync(@HostParam("endpoint") String endpoint, @PathParam("ruleId") String ruleId, - @PathParam("stream") String stream, @QueryParam("api-version") String apiVersion, - @BodyParam("application/json") BinaryData body, @HeaderParam("Accept") String accept, - RequestOptions requestOptions, Context context); - } - - /** - * Ingestion API used to directly ingest data using Data Collection Rules - * - * See error response code and error response message for more detail. - *

Header Parameters

- * - * - * - * - * - *
Header Parameters
NameTypeRequiredDescription
Content-EncodingStringNogzip
x-ms-client-request-idStringNoClient request Id
- * You can add these to a request with {@link RequestOptions#addHeader} - *

Request Body Schema

- * - *
-     * {@code
-     * [
-     *     Object (Required)
-     * ]
-     * }
-     * 
- * - * @param ruleId The immutable Id of the Data Collection Rule resource. - * @param stream The streamDeclaration name as defined in the Data Collection Rule. - * @param body An array of objects matching the schema defined by the provided stream. - * @param requestOptions The options to configure the HTTP request before HTTP client sends it. - * @throws HttpResponseException thrown if the request is rejected by server. - * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. - * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. - * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - * @return the {@link Response} on successful completion of {@link Mono}. - */ - @ServiceMethod(returns = ReturnType.SINGLE) - public Mono> uploadWithResponseAsync(String ruleId, String stream, BinaryData body, - RequestOptions requestOptions) { - if (ruleId == null) { - throw LOGGER.atError() - .log(new IllegalArgumentException("Parameter ruleId is required and cannot be null.")); - } - if (stream == null) { - throw LOGGER.atError() - .log(new IllegalArgumentException("Parameter stream is required and cannot be null.")); - } - if (body == null) { - throw LOGGER.atError().log(new IllegalArgumentException("Parameter body is required and cannot be null.")); - } - final String accept = "application/json"; - return FluxUtil.withContext(context -> service.upload(this.getEndpoint(), ruleId, stream, - this.getServiceVersion().getVersion(), body, accept, requestOptions, context)); - } - - /** - * Ingestion API used to directly ingest data using Data Collection Rules - * - * See error response code and error response message for more detail. - *

Header Parameters

- * - * - * - * - * - *
Header Parameters
NameTypeRequiredDescription
Content-EncodingStringNogzip
x-ms-client-request-idStringNoClient request Id
- * You can add these to a request with {@link RequestOptions#addHeader} - *

Request Body Schema

- * - *
-     * {@code
-     * [
-     *     Object (Required)
-     * ]
-     * }
-     * 
- * - * @param ruleId The immutable Id of the Data Collection Rule resource. - * @param stream The streamDeclaration name as defined in the Data Collection Rule. - * @param body An array of objects matching the schema defined by the provided stream. - * @param requestOptions The options to configure the HTTP request before HTTP client sends it. - * @throws HttpResponseException thrown if the request is rejected by server. - * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. - * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. - * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - * @return the {@link Response}. - */ - @ServiceMethod(returns = ReturnType.SINGLE) - public Response uploadWithResponse(String ruleId, String stream, BinaryData body, - RequestOptions requestOptions) { - if (ruleId == null) { - throw LOGGER.atError() - .log(new IllegalArgumentException("Parameter ruleId is required and cannot be null.")); - } - if (stream == null) { - throw LOGGER.atError() - .log(new IllegalArgumentException("Parameter stream is required and cannot be null.")); - } - if (body == null) { - throw LOGGER.atError().log(new IllegalArgumentException("Parameter body is required and cannot be null.")); - } - final String accept = "application/json"; - return service.uploadSync(this.getEndpoint(), ruleId, stream, this.getServiceVersion().getVersion(), body, - accept, requestOptions, Context.NONE); - } - - private static final ClientLogger LOGGER = new ClientLogger(IngestionUsingDataCollectionRulesClientImpl.class); -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesServiceVersion.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesServiceVersion.java deleted file mode 100644 index 1a6e5ef84d75..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/IngestionUsingDataCollectionRulesServiceVersion.java +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) AutoRest Code Generator. - -package com.azure.monitor.ingestion.implementation; - -import com.azure.core.util.ServiceVersion; - -/** - * Service version of IngestionUsingDataCollectionRulesClient. - */ -public enum IngestionUsingDataCollectionRulesServiceVersion implements ServiceVersion { - /** - * Enum value 2023-01-01. - */ - V2023_01_01("2023-01-01"); - - private final String version; - - IngestionUsingDataCollectionRulesServiceVersion(String version) { - this.version = version; - } - - /** - * {@inheritDoc} - */ - @Override - public String getVersion() { - return this.version; - } - - /** - * Gets the latest service version supported by this client library. - * - * @return The latest {@link IngestionUsingDataCollectionRulesServiceVersion}. - */ - public static IngestionUsingDataCollectionRulesServiceVersion getLatest() { - return V2023_01_01; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/LogsIngestionRequest.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/LogsIngestionRequest.java deleted file mode 100644 index 2d1e8ccf5c6d..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/LogsIngestionRequest.java +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion.implementation; - -import java.util.List; - -public class LogsIngestionRequest { - - private final List logs; - private final byte[] requestBody; - - public LogsIngestionRequest(List logs, byte[] requestBody) { - this.logs = logs; - this.requestBody = requestBody; - } - - public List getLogs() { - return logs; - } - - public byte[] getRequestBody() { - return requestBody; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/UploadLogsResponseHolder.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/UploadLogsResponseHolder.java deleted file mode 100644 index 2fc11612a87a..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/UploadLogsResponseHolder.java +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion.implementation; - -import com.azure.core.annotation.Immutable; -import com.azure.core.exception.HttpResponseException; -import com.azure.core.models.ResponseError; - -/** - * The intermediate response holder for converting exceptions to {@link ResponseError} instances. - */ -@Immutable -public final class UploadLogsResponseHolder { - - private final HttpResponseException exception; - private final LogsIngestionRequest request; - - public UploadLogsResponseHolder(LogsIngestionRequest request, HttpResponseException ex) { - this.request = request; - this.exception = ex; - } - - public LogsIngestionRequest getRequest() { - return request; - } - - public HttpResponseException getException() { - return exception; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/Utils.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/Utils.java deleted file mode 100644 index 4049568e9ab1..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/Utils.java +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion.implementation; - -import com.azure.core.util.logging.ClientLogger; -import com.azure.monitor.ingestion.models.LogsUploadOptions; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.zip.GZIPOutputStream; - -public final class Utils { - public static final long MAX_REQUEST_PAYLOAD_SIZE = 1024 * 1024; // 1 MB - public static final String GZIP = "gzip"; - - private static final ClientLogger LOGGER = new ClientLogger(Utils.class); - - private Utils() { - } - - /** - * Gzips the input byte array. - * @param bytes The input byte array. - * @return gzipped byte array. - */ - public static byte[] gzipRequest(byte[] bytes) { - // This should be moved to azure-core and should be enabled when the client library requests for gzipping the - // request body content. - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - try (GZIPOutputStream zip = new GZIPOutputStream(byteArrayOutputStream)) { - zip.write(bytes); - } catch (IOException exception) { - throw LOGGER.logExceptionAsError(new UncheckedIOException(exception)); - } - return byteArrayOutputStream.toByteArray(); - } - - public static int getConcurrency(LogsUploadOptions options) { - if (options != null && options.getMaxConcurrency() != null) { - return options.getMaxConcurrency(); - } - - return 1; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/package-info.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/package-info.java deleted file mode 100644 index e7584cdc91ee..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/implementation/package-info.java +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) AutoRest Code Generator. - -/** - * Package containing the classes for IngestionUsingDataCollectionRules. - * null. - */ -package com.azure.monitor.ingestion.implementation; diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsIngestionAudience.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsIngestionAudience.java deleted file mode 100644 index c14d7a690e6d..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsIngestionAudience.java +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion.models; - -import com.azure.core.util.ExpandableStringEnum; - -import java.util.Collection; - -/** - * The audience indicating the authorization scope of log ingestion clients. - */ -public class LogsIngestionAudience extends ExpandableStringEnum { - - /** - * Static value for Azure Public Cloud. - */ - public static final LogsIngestionAudience AZURE_PUBLIC_CLOUD = fromString("https://monitor.azure.com//.default"); - - /** - * Static value for Azure US Government. - */ - public static final LogsIngestionAudience AZURE_GOVERNMENT = fromString("https://monitor.azure.us//.default"); - - /** - * Static value for Azure China. - */ - public static final LogsIngestionAudience AZURE_CHINA = fromString("https://monitor.azure.cn//.default"); - - /** - * @deprecated Creates an instance of LogsIngestionAudience. - */ - @Deprecated - LogsIngestionAudience() { - } - - /** - * Creates an instance of LogsIngestionAudience. - * - * @param name the string value. - * @return the LogsIngestionAudience. - */ - public static LogsIngestionAudience fromString(String name) { - return fromString(name, LogsIngestionAudience.class); - } - - /** - * Get the collection of LogsIngestionAudience values. - * - * @return the collection of LogsIngestionAudience values. - */ - public static Collection values() { - return values(LogsIngestionAudience.class); - } - -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsUploadError.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsUploadError.java deleted file mode 100644 index 9d01f1370cde..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsUploadError.java +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion.models; - -import com.azure.core.annotation.Immutable; -import com.azure.core.exception.HttpResponseException; - -import java.util.List; - -/** - * The model representing the error and the associated logs that failed when uploading a subset of logs to Azure - * Monitor. - */ -@Immutable -public final class LogsUploadError { - private final HttpResponseException responseException; - private final List failedLogs; - - /** - * Creates an instance of error. - * @param responseException the response exception containing the error details returned by the service. - * @param failedLogs the logs that failed to upload. - */ - public LogsUploadError(HttpResponseException responseException, List failedLogs) { - this.responseException = responseException; - this.failedLogs = failedLogs; - } - - /** - * Returns the response error containing the error details returned by the service. - * @return the response error containing the error details returned by the service. - */ - public HttpResponseException getResponseException() { - return responseException; - } - - /** - * Returns the logs that failed to upload. - * @return the logs that failed to upload. - */ - public List getFailedLogs() { - return failedLogs; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsUploadException.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsUploadException.java deleted file mode 100644 index cf9cc54ddc5b..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsUploadException.java +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion.models; - -import com.azure.core.exception.AzureException; -import com.azure.core.exception.HttpResponseException; - -import java.util.List; - -/** - * An aggregate exception containing all inner exceptions that were caused from uploading logs. - */ -public class LogsUploadException extends AzureException { - /** - * Total count of all logs that were not uploaded to Azure Monitor due to errors. - */ - private final long failedLogsCount; - /** - * A list of all HTTP errors that occured when uploading logs to Azure Monitor service. - */ - private final List logsUploadErrors; - - /** - * Creates an instance of {@link LogsUploadException}. - * @param logsUploadErrors list of all HTTP response exceptions. - * @param failedLogsCount the total number of logs that failed to upload. - */ - public LogsUploadException(List logsUploadErrors, long failedLogsCount) { - this.logsUploadErrors = logsUploadErrors; - this.failedLogsCount = failedLogsCount; - } - - /** - * Returns the list of all HTTP response exceptions. - * @return The list of all errors. - */ - public List getLogsUploadErrors() { - return this.logsUploadErrors; - } - - /** - * Returns the total number for logs that failed to upload. - * @return the total number of logs that failed to upload. - */ - public long getFailedLogsCount() { - return failedLogsCount; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsUploadOptions.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsUploadOptions.java deleted file mode 100644 index 387fbc8149ea..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/LogsUploadOptions.java +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion.models; - -import com.azure.core.annotation.Fluent; -import com.azure.core.util.serializer.ObjectSerializer; - -import java.util.function.Consumer; - -/** - * The options model to configure the request to upload logs to Azure Monitor. - */ -@Fluent -public final class LogsUploadOptions { - private ObjectSerializer objectSerializer; - private Integer maxConcurrency; - private Consumer logsUploadErrorConsumer; - - /** - * Creates an instance of {@link LogsUploadOptions}. - */ - public LogsUploadOptions() { - - } - - /** - * Returns the serializer to use to convert the log objects to JSON. - * @return the serializer to use to convert the log objects to JSON. - */ - public ObjectSerializer getObjectSerializer() { - return objectSerializer; - } - - /** - * Sets the serializer to use to convert the log objects to JSON. - * @param objectSerializer the serializer to use to convert the log objects to JSON. - * @return the updated {@link LogsUploadOptions} instance. - */ - public LogsUploadOptions setObjectSerializer(ObjectSerializer objectSerializer) { - this.objectSerializer = objectSerializer; - return this; - } - - /** - * Returns the max concurrent requests to send to the Azure Monitor service when uploading logs. - * @return the max concurrent requests to send to the Azure Monitor service when uploading logs. - */ - public Integer getMaxConcurrency() { - return maxConcurrency; - } - - /** - * Sets the max concurrent requests to send to the Azure Monitor service when uploading logs. - * @param maxConcurrency the max concurrent requests to send to the Azure Monitor service when uploading logs. - * @return the updated {@link LogsUploadOptions} instance. - */ - public LogsUploadOptions setMaxConcurrency(Integer maxConcurrency) { - this.maxConcurrency = maxConcurrency; - return this; - } - - /** - * Returns the error handler that is called when a request to the Azure Monitor service to upload logs fails. - * @return the error handler that is called when a request to the Azure Monitor service to upload logs fails. - */ - public Consumer getLogsUploadErrorConsumer() { - return logsUploadErrorConsumer; - } - - /** - * Sets the error handler that is called when a request to the Azure Monitor service to upload logs fails. - * @param logsUploadErrorConsumer the error handler that is called when a request to the Azure Monitor service to - * upload logs fails. - * @return the updated {@link LogsUploadOptions} instance. - */ - public LogsUploadOptions setLogsUploadErrorConsumer(Consumer logsUploadErrorConsumer) { - this.logsUploadErrorConsumer = logsUploadErrorConsumer; - return this; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/package-info.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/package-info.java deleted file mode 100644 index 16eb3b425f93..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/models/package-info.java +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -/** - * Package containing models for uploading logs to Azure Monitor. - */ -package com.azure.monitor.ingestion.models; diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/package-info.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/package-info.java deleted file mode 100644 index 633be4e69a7c..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/com/azure/monitor/ingestion/package-info.java +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -/** - * The Azure Monitor Ingestion client library is used to send custom logs to an Azure Monitor - * Log Analytics workspace. - * - *

Getting Started

- * - *

Prerequisites

- * - *

The client library requires the following:

- * - *
    - *
  • Java 8 or later
  • - *
  • An Azure subscription
  • - *
  • An existing Azure Monitor Data Collection Rule
  • - *
  • An existing Azure Monitor Data Collection Endpoint
  • - *
  • An existing Azure Monitor Log Analytics workspace
  • - *
- * - *
- * - *

Authenticate a Client

- * - *

- * The {@link com.azure.monitor.ingestion.LogsIngestionClient LogIngestionClient} and - * {@link com.azure.monitor.ingestion.LogsIngestionAsyncClient LogIngestionAsyncClient} can be authenticated - * using Microsoft Entra ID. To authenticate with Microsoft Entra ID, create a - * {@link com.azure.core.credential.TokenCredential TokenCredential} that can be passed to the - * {@link com.azure.monitor.ingestion.LogsIngestionClientBuilder LogIngestionClientBuilder}. The Azure Identity - * library provides implementations of {@link com.azure.core.credential.TokenCredential TokenCredential} for - * multiple authentication flows. See {@link com.azure.core.credential.TokenCredential TokenCredential} for multiple - * authentication flows. See Azure Identity - * Azure Identity - * for more information. See {@link com.azure.monitor.ingestion.LogsIngestionClientBuilder LogIngestionClientBuilder} for more examples on authenticating a client. - *

- * - *

- * The following sample demonstrates how to create a {@link com.azure.monitor.ingestion.LogsIngestionClient LogIngestionClient} - * using {@link com.azure.monitor.ingestion.LogsIngestionClientBuilder LogIngestionClientBuilder} and TokenCredential authentication. - *

- * - * - *
- * LogsIngestionClient logsIngestionClient = new LogsIngestionClientBuilder()
- *         .credential(tokenCredential)
- *         .endpoint("<data-collection-endpoint>")
- *         .buildClient();
- * 
- * - * - *
- * - *

Overview

- * - *

- * The Logs Ingestion REST API in Azure Monitor lets you send data to a Log Analytics workspace. - * The API allows you to send data to supported tables - * or to custom tables that you create. You can - * also extend the schema of Azure tables with custom columns to accept additional data. - *

- * - *

- * The Azure Monitor Ingestion client library provides both synchronous and asynchronous client implementations, - * providing you the capability to send custom logs to an Azure Monitor Log Analytics workspace. - *

- * - *
- * - *

Key Concepts

- * - *

Data Collection Endpoint

- * - *

- * Data Collection Endpoints (DCEs) allow you to uniquely configure ingestion settings for Azure Monitor. - * This article - * provides an overview of data collection endpoints including their contents and structure and how you can create and work with them. - *

- * - *

Data Collection Rule

- * - *

- * Data collection rules (DCR) define data collected by Azure Monitor and specify how and where that data should be sent - * or stored. The REST API call must specify a DCR to use. A single DCE can support multiple DCRs, so you can specify a different DCR for different sources and target tables. - *

- * - *

- * The DCR must understand the structure of the input data and the structure of the target table. If the two don't match, - * it can use a transformation to convert the source data to match the target table. You may also use the transform - * to filter source data and perform any other calculations or conversions. - *

- * - *

- * For more details, see Data collection rules - * in Azure Monitor. For information on how to retrieve a DCR ID, - * see this tutorial. - *

- * - *

Log Analytics Workspace Tables

- * - *

- * Custom logs can send data to any custom table that you create and to - * certain built-in tables - * in your Log Analytics workspace. The target table must exist before you can send data to it. - *

- * - *

Logs retrieval

- * - *

- * The logs that were uploaded using this library can be queried using the - * Azure Monitor Query client library. - *

- * - *
- * - *

Client Usage

- * - *

- * Uploading logs to Azure Monitor - *

- * - *

- * The following sample demonstrates how to upload logs to Azure Monitor using - * {@link com.azure.monitor.ingestion.LogsIngestionClient LogIngestionClient}. - *

- * - * - *
- * List<Object> logs = getLogs();
- * logsIngestionClient.upload("<data-collection-rule-id>", "<stream-name>", logs);
- * System.out.println("Logs uploaded successfully");
- * 
- * - * - *

- * For more synchronous and asynchronous client usage information, see {@link com.azure.monitor.ingestion.LogsIngestionClient} and - * {@link com.azure.monitor.ingestion.LogsIngestionAsyncClient}, respectively. - *

- * - * @see com.azure.monitor.ingestion.LogsIngestionClient - * @see com.azure.monitor.ingestion.LogsIngestionAsyncClient - * @see com.azure.monitor.ingestion.LogsIngestionClientBuilder - * - */ -package com.azure.monitor.ingestion; diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/java/module-info.java b/sdk/monitor/azure-monitor-ingestion/src/main/java/module-info.java deleted file mode 100644 index b32a3e25d837..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/java/module-info.java +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) AutoRest Code Generator. - -module com.azure.monitor.ingestion { - requires transitive com.azure.core; - - exports com.azure.monitor.ingestion; - exports com.azure.monitor.ingestion.models; - - opens com.azure.monitor.ingestion to com.azure.core; -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/main/resources/azure-monitor-ingestion.properties b/sdk/monitor/azure-monitor-ingestion/src/main/resources/azure-monitor-ingestion.properties deleted file mode 100644 index ca812989b4f2..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/main/resources/azure-monitor-ingestion.properties +++ /dev/null @@ -1,2 +0,0 @@ -name=${project.artifactId} -version=${project.version} diff --git a/sdk/monitor/azure-monitor-ingestion/src/samples/README.md b/sdk/monitor/azure-monitor-ingestion/src/samples/README.md deleted file mode 100644 index 7c19a54787a1..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/samples/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Azure Monitor Ingestion client library for Java - -## Getting started -Getting started explained in detail [here][SDK_README_GETTING_STARTED]. - -## Key concepts -Key concepts are explained in detail [here][SDK_README_KEY_CONCEPTS]. - -## Examples -The following sections provide several code snippets covering some of the most common scenarios, including: - -## Troubleshooting -Troubleshooting steps can be found [here][SDK_README_TROUBLESHOOTING]. - -## Next steps -See [Next steps][SDK_README_NEXT_STEPS]. - -## Contributing -This project welcomes contributions and suggestions. Find [more contributing][SDK_README_CONTRIBUTING] details here. - - -[SDK_README_CONTRIBUTING]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/monitor/azure-monitor-ingestion/README.md#contributing -[SDK_README_GETTING_STARTED]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/monitor/azure-monitor-ingestion/README.md#getting-started -[SDK_README_TROUBLESHOOTING]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/monitor/azure-monitor-ingestion/README.md#troubleshooting -[SDK_README_KEY_CONCEPTS]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/monitor/azure-monitor-ingestion/README.md#key-concepts -[SDK_README_NEXT_STEPS]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/monitor/azure-monitor-ingestion/README.md#next-steps - - diff --git a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/CustomLogData.java b/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/CustomLogData.java deleted file mode 100644 index 3a710f8a9dd6..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/CustomLogData.java +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.fasterxml.jackson.annotation.JsonProperty; - -import java.time.OffsetDateTime; - -/** - * An object representing custom log data as defined in the data collection rule's stream declaration. - */ -public final class CustomLogData { - - @JsonProperty(value = "Time") - private OffsetDateTime time; - - - @JsonProperty(value = "ExtendedColumn") - private String extendedColumn; - - - @JsonProperty(value = "AdditionalContext") - private String additionalContext; - - /** - * Returns the time of creation of this log. - * @return the time of creation of this log. - */ - public OffsetDateTime getTime() { - return time; - } - - /** - * Sets the time of creation of this log. - * @param time the time of creation of this log. - * @return the updated {@link CustomLogData}. - */ - public CustomLogData setTime(OffsetDateTime time) { - this.time = time; - return this; - } - - /** - * Returns the extended column field of this log. - * @return the extended column field of this log. - */ - public String getExtendedColumn() { - return extendedColumn; - } - - /** - * Sets the extended column field of this log. - * @param extendedColumn the extended column field of this log. - * @return the updated {@link CustomLogData}. - */ - public CustomLogData setExtendedColumn(String extendedColumn) { - this.extendedColumn = extendedColumn; - return this; - } - - /** - * Returns the additional context field of this log. - * @return the additional context field of this log. - */ - public String getAdditionalContext() { - return additionalContext; - } - - /** - * Sets the additional context field of this log. - * @param additionalContext the additional context field of this log. - * @return the updated {@link CustomLogData}. - */ - public CustomLogData setAdditionalContext(String additionalContext) { - this.additionalContext = additionalContext; - return this; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/CustomLogSerializer.java b/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/CustomLogSerializer.java deleted file mode 100644 index c6f186a45f5d..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/CustomLogSerializer.java +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.util.serializer.ObjectSerializer; -import com.azure.core.util.serializer.TypeReference; -import com.fasterxml.jackson.core.JsonEncoding; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerator; -import reactor.core.publisher.Mono; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.time.format.DateTimeFormatter; - -/*** - * Custom serializer sample for the `CustomLogData` class. - * Only the `serialize` method is implemented as we are not expected to deserialize the data here. - */ -public class CustomLogSerializer implements ObjectSerializer { - - private static final DateTimeFormatter FORMATTER = DateTimeFormatter.ofPattern("yyyy/MM/dd HH:mm:ssZZZ"); - private final JsonFactory jsonFactory; - - public CustomLogSerializer() { - jsonFactory = JsonFactory.builder().build(); - } - - @Override - public T deserialize(InputStream stream, TypeReference typeReference) { - // This method will never be called - throw new UnsupportedOperationException("Deserialize called on custom serializer. Which should not happen."); - } - - @Override - public Mono deserializeAsync(InputStream stream, TypeReference typeReference) { - return Mono.fromCallable(() -> deserialize(stream, typeReference)); - } - - @Override - public void serialize(OutputStream stream, Object value) { - if (!(value instanceof CustomLogData)) { - throw new RuntimeException("Unknown object type passed to custom serializer"); - } - - final JsonGenerator gen; - final CustomLogData data = (CustomLogData) value; - try { - gen = jsonFactory.createGenerator(stream, JsonEncoding.UTF8); - gen.writeStartObject(); - gen.writeStringField("logTime", FORMATTER.format(data.getTime())); - gen.writeStringField("extendedColumn", data.getExtendedColumn()); - gen.writeStringField("additionalContext", data.getAdditionalContext()); - gen.writeEndObject(); - } catch (IOException e) { - throw new RuntimeException("Unexpected IO exception.", e); - } - } - - @Override - public Mono serializeAsync(OutputStream stream, Object value) { - return Mono.fromRunnable(() -> serialize(stream, value)); - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/CustomSerializerSample.java b/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/CustomSerializerSample.java deleted file mode 100644 index 4934a236ec63..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/CustomSerializerSample.java +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.util.serializer.ObjectSerializer; -import com.azure.identity.DefaultAzureCredentialBuilder; -import com.azure.monitor.ingestion.models.LogsUploadOptions; - -import java.time.OffsetDateTime; -import java.util.ArrayList; -import java.util.List; - -public class CustomSerializerSample { - /** - * Main method to run the sample. - * @param args ignore args. - */ - public static void main(String[] args) { - LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(new DefaultAzureCredentialBuilder().build()) - .buildClient(); - - List dataList = getLogs(); - - ObjectSerializer customSerializer = new CustomLogSerializer(); - - LogsUploadOptions options = new LogsUploadOptions() - .setObjectSerializer(customSerializer); - - client.upload("", - "", - dataList, - options); - } - - private static List getLogs() { - List logs = new ArrayList<>(); - - for (int i = 0; i < 10; i++) { - CustomLogData e = new CustomLogData() - .setTime(OffsetDateTime.now()) - .setExtendedColumn("extend column data" + i) - .setAdditionalContext("more logs context"); - logs.add(e); - } - return logs; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/ReadmeSamples.java b/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/ReadmeSamples.java deleted file mode 100644 index 4fe706484b9e..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/ReadmeSamples.java +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.http.policy.HttpLogDetailLevel; -import com.azure.core.http.policy.HttpLogOptions; -import com.azure.core.util.Context; -import com.azure.identity.DefaultAzureCredential; -import com.azure.identity.DefaultAzureCredentialBuilder; -import com.azure.monitor.ingestion.models.LogsUploadOptions; - -import java.util.List; - -/** - * Class to include all the README.md code samples. - */ -public final class ReadmeSamples { - - /** - * Sample to demonstrate creation of a synchronous client. - */ - public void createClient() { - // BEGIN: readme-sample-createLogsIngestionClient - DefaultAzureCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - - LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(tokenCredential) - .buildClient(); - // END: readme-sample-createLogsIngestionClient - } - - /** - * Sample to demonstrate creation of an asynchronous client. - */ - public void createAsyncClient() { - // BEGIN: readme-sample-createLogsIngestionAsyncClient - DefaultAzureCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - - LogsIngestionAsyncClient asyncClient = new LogsIngestionClientBuilder() - .endpoint("") - .credential(tokenCredential) - .buildAsyncClient(); - // END: readme-sample-createLogsIngestionAsyncClient - } - - /** - * Sample to demonstrate uploading logs to Azure Monitor. - */ - public void uploadLogs() { - // BEGIN: readme-sample-uploadLogs - DefaultAzureCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - - LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(tokenCredential) - .buildClient(); - - List logs = getLogs(); - client.upload("", "", logs); - System.out.println("Logs uploaded successfully"); - // END: readme-sample-uploadLogs - } - - /** - * Sample to demonstrate uploading logs to Azure Monitor. - */ - public void uploadLogsWithMaxConcurrency() { - // BEGIN: readme-sample-uploadLogsWithMaxConcurrency - DefaultAzureCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - - LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(tokenCredential) - .buildClient(); - - List logs = getLogs(); - LogsUploadOptions logsUploadOptions = new LogsUploadOptions() - .setMaxConcurrency(3); - client.upload("", "", logs, logsUploadOptions, - Context.NONE); - System.out.println("Logs uploaded successfully"); - // END: readme-sample-uploadLogsWithMaxConcurrency - } - - - /** - * Sample to demonstrate uploading logs to Azure Monitor. - */ - public void uploadLogsWithErrorHandler() { - // BEGIN: readme-sample-uploadLogs-error-handler - DefaultAzureCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - - LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(tokenCredential) - .buildClient(); - - List logs = getLogs(); - - LogsUploadOptions logsUploadOptions = new LogsUploadOptions() - .setLogsUploadErrorConsumer(uploadLogsError -> { - System.out.println("Error message " + uploadLogsError.getResponseException().getMessage()); - System.out.println("Total logs failed to upload = " + uploadLogsError.getFailedLogs().size()); - - // throw the exception here to abort uploading remaining logs - // throw uploadLogsError.getResponseException(); - }); - client.upload("", "", logs, logsUploadOptions, - Context.NONE); - // END: readme-sample-uploadLogs-error-handler - } - - /** - * Enable HTTP request and response logging. - */ - public void tsgEnableHttpLogging() { - DefaultAzureCredential credential = new DefaultAzureCredentialBuilder().build(); - - // BEGIN: readme-sample-enablehttplogging - LogsIngestionClient logsIngestionClient = new LogsIngestionClientBuilder() - .endpoint("") - .credential(credential) - .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) - .buildClient(); - // END: readme-sample-enablehttplogging - } - - - - private List getLogs() { - return null; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsAsyncClientErrorHandlingSample.java b/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsAsyncClientErrorHandlingSample.java deleted file mode 100644 index 1c376ea8e1ba..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsAsyncClientErrorHandlingSample.java +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.exception.HttpResponseException; -import com.azure.identity.DefaultAzureCredentialBuilder; -import com.azure.monitor.ingestion.models.LogsUploadOptions; -import reactor.core.publisher.Mono; - -import java.time.Duration; -import java.time.OffsetDateTime; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -/** - * This sample demonstrates uploading logs to Azure Monitor using the async client and configure an error - * handler to handle any failures when uploading logs to the service. - */ -public class UploadLogsAsyncClientErrorHandlingSample { - private static final Duration TIMEOUT = Duration.ofSeconds(10); - /** - * Main method to run the sample. - * @param args ignore args. - */ - public static void main(String[] args) throws InterruptedException { - UploadLogsAsyncClientErrorHandlingSample sample = new UploadLogsAsyncClientErrorHandlingSample(); - sample.run(); - } - - private void run() throws InterruptedException { - LogsIngestionAsyncClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(new DefaultAzureCredentialBuilder().build()) - .buildAsyncClient(); - - CountDownLatch countdownLatch = new CountDownLatch(1); - List dataList = getLogs(); - - // Configure the error handler to inspect HTTP request failure and the logs associated with the failed - // request. A single client.upload() call can be broken down by the client into smaller HTTP requests, so, - // this error handler can be called multiple times if there are multiple HTTP request failures. - LogsUploadOptions logsUploadOptions = new LogsUploadOptions() - .setLogsUploadErrorConsumer(uploadLogsError -> { - HttpResponseException responseException = uploadLogsError.getResponseException(); - System.out.println(responseException.getMessage()); - System.out.println("Failed logs count " + uploadLogsError.getFailedLogs().size()); - }); - - // More details on Mono<> can be found in the project reactor documentation at : - // https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html - Mono resultMono = client.upload("", - "", dataList, logsUploadOptions); - - resultMono.subscribe( - ignored -> { - // returns void - }, - error -> { - // service errors are handled by the error consumer but if the error consumer throws - // an exception or if a required param like data collection rule id is null, upload operation will - // abort and the exception will be handled here - System.out.println(error.getMessage()); - }, - countdownLatch::countDown); - - // Subscribe is not a blocking call, so we wait here so the program does not terminate. - countdownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); - } - - private static List getLogs() { - List logs = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - CustomLogData e = new CustomLogData() - .setTime(OffsetDateTime.now()) - .setExtendedColumn("extend column data" + i); - logs.add(e); - } - return logs; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsAsyncClientSample.java b/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsAsyncClientSample.java deleted file mode 100644 index ae3c576cbccf..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsAsyncClientSample.java +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.identity.DefaultAzureCredentialBuilder; -import com.azure.monitor.ingestion.models.LogsUploadException; -import reactor.core.publisher.Mono; - -import java.time.Duration; -import java.time.OffsetDateTime; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -/** - * Sample to demonstrate uploading logs to Azure Monitor using the Async client. - */ -public class UploadLogsAsyncClientSample { - - private static final Duration TIMEOUT = Duration.ofSeconds(10); - /** - * Main method to run the sample. - * @param args ignore args. - */ - public static void main(String[] args) throws InterruptedException { - UploadLogsAsyncClientSample sample = new UploadLogsAsyncClientSample(); - sample.run(); - } - - private void run() throws InterruptedException { - LogsIngestionAsyncClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(new DefaultAzureCredentialBuilder().build()) - .buildAsyncClient(); - - CountDownLatch countdownLatch = new CountDownLatch(1); - List dataList = getLogs(); - // More details on Mono<> can be found in the project reactor documentation at : - // https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html - Mono resultMono = client.upload("", - "", dataList); - - resultMono.subscribe( - ignored -> { - // returns void - }, - error -> { - // If any exceptions are thrown, they are handled here. - if (error instanceof LogsUploadException) { - LogsUploadException ex = (LogsUploadException) error; - System.out.println("Failed to upload " + ex.getFailedLogsCount() + "logs."); - } - }, - countdownLatch::countDown); - - // Subscribe is not a blocking call, so we wait here so the program does not terminate. - countdownLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); - } - - private static List getLogs() { - List logs = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - CustomLogData e = new CustomLogData() - .setTime(OffsetDateTime.now()) - .setExtendedColumn("extend column data" + i); - logs.add(e); - } - return logs; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsErrorHandlingSample.java b/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsErrorHandlingSample.java deleted file mode 100644 index c2443bf05d61..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsErrorHandlingSample.java +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.exception.HttpResponseException; -import com.azure.identity.DefaultAzureCredentialBuilder; -import com.azure.monitor.ingestion.models.LogsUploadOptions; - -import java.time.OffsetDateTime; -import java.util.ArrayList; -import java.util.List; - -/** - * This sample demonstrates uploading logs to Azure Monitor and configure an error handler to handle any failures - * when uploading logs to the service. - */ -public class UploadLogsErrorHandlingSample { - /** - * Main method to run the sample. - * @param args ignore args. - */ - public static void main(String[] args) { - LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .credential(new DefaultAzureCredentialBuilder().build()) - .buildClient(); - - List dataList = getLogs(); - - // Configure the error handler to inspect HTTP request failure and the logs associated with the failed - // request. A single client.upload() call can be broken down by the client into smaller HTTP requests, so, - // this error handler can be called multiple times if there are multiple HTTP request failures. - LogsUploadOptions logsUploadOptions = new LogsUploadOptions() - .setLogsUploadErrorConsumer(uploadLogsError -> { - HttpResponseException responseException = uploadLogsError.getResponseException(); - System.out.println(responseException.getMessage()); - System.out.println("Failed logs count " + uploadLogsError.getFailedLogs().size()); - }); - client.upload("", "", dataList, logsUploadOptions); - } - - private static List getLogs() { - List logs = new ArrayList<>(); - - for (int i = 0; i < 10; i++) { - CustomLogData e = new CustomLogData() - .setTime(OffsetDateTime.now()) - .setExtendedColumn("extend column data" + i) - .setAdditionalContext("more logs context"); - logs.add(e); - } - return logs; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsSample.java b/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsSample.java deleted file mode 100644 index f6658bbe714d..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/UploadLogsSample.java +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.identity.DefaultAzureCredentialBuilder; -import com.azure.monitor.ingestion.models.LogsIngestionAudience; - -import java.time.OffsetDateTime; -import java.util.ArrayList; -import java.util.List; - -/** - * Sample to demonstrate uploading logs to Azure Monitor. - */ -public final class UploadLogsSample { - - /** - * Main method to run the sample. - * @param args ignore args. - */ - public static void main(String[] args) { - LogsIngestionClient client = new LogsIngestionClientBuilder() - .endpoint("") - .audience(LogsIngestionAudience.AZURE_PUBLIC_CLOUD) - .credential(new DefaultAzureCredentialBuilder().build()) - .buildClient(); - - List dataList = getLogs(); - client.upload("", "", dataList); - System.out.println("Logs uploaded successfully"); - } - - private static List getLogs() { - List logs = new ArrayList<>(); - - for (int i = 0; i < 10; i++) { - CustomLogData e = new CustomLogData() - .setTime(OffsetDateTime.now()) - .setExtendedColumn("extend column data" + i) - .setAdditionalContext("more logs context"); - logs.add(e); - } - return logs; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/codesnippets/LogsIngestionJavadocCodeSnippets.java b/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/codesnippets/LogsIngestionJavadocCodeSnippets.java deleted file mode 100644 index 042e18de2361..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/samples/java/com/azure/monitor/ingestion/codesnippets/LogsIngestionJavadocCodeSnippets.java +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion.codesnippets; - -import com.azure.core.credential.TokenCredential; -import com.azure.core.util.Context; -import com.azure.identity.DefaultAzureCredentialBuilder; -import com.azure.monitor.ingestion.LogsIngestionAsyncClient; -import com.azure.monitor.ingestion.LogsIngestionClient; -import com.azure.monitor.ingestion.LogsIngestionClientBuilder; -import com.azure.monitor.ingestion.models.LogsUploadOptions; - -import java.util.List; - -/** - * Class containing javadoc code snippets for {@link LogsIngestionClient}. - */ -public class LogsIngestionJavadocCodeSnippets { - - /** - * Code snippet for creating a {@link LogsIngestionClient}. - */ - public void instantiation() { - TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - // BEGIN: com.azure.monitor.ingestion.LogsIngestionClient.instantiation - LogsIngestionClient logsIngestionClient = new LogsIngestionClientBuilder() - .credential(tokenCredential) - .endpoint("") - .buildClient(); - // END: com.azure.monitor.ingestion.LogsIngestionClient.instantiation - - // BEGIN: com.azure.monitor.ingestion.LogsIngestionAsyncClient.instantiation - LogsIngestionAsyncClient logsIngestionAsyncClient = new LogsIngestionClientBuilder() - .credential(tokenCredential) - .endpoint("") - .buildAsyncClient(); - // END: com.azure.monitor.ingestion.LogsIngestionAsyncClient.instantiation - } - - public void logsUploadSample() { - TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - LogsIngestionClient logsIngestionClient = new LogsIngestionClientBuilder() - .credential(tokenCredential) - .endpoint("") - .buildClient(); - // BEGIN: com.azure.monitor.ingestion.LogsIngestionClient.upload - List logs = getLogs(); - logsIngestionClient.upload("", "", logs); - System.out.println("Logs uploaded successfully"); - // END: com.azure.monitor.ingestion.LogsIngestionClient.upload - } - - public void logsUploadWithConcurrencySample() { - TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - LogsIngestionClient logsIngestionClient = new LogsIngestionClientBuilder() - .credential(tokenCredential) - .endpoint("") - .buildClient(); - // BEGIN: com.azure.monitor.ingestion.LogsIngestionClient.uploadWithConcurrency - List logs = getLogs(); - LogsUploadOptions logsUploadOptions = new LogsUploadOptions().setMaxConcurrency(4); - logsIngestionClient.upload("", "", logs, - logsUploadOptions, Context.NONE); - System.out.println("Logs uploaded successfully"); - // END: com.azure.monitor.ingestion.LogsIngestionClient.uploadWithConcurrency - } - - public void logsUploadAsyncSample() { - - TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - LogsIngestionAsyncClient logsIngestionAsyncClient = new LogsIngestionClientBuilder() - .credential(tokenCredential) - .endpoint("") - .buildAsyncClient(); - // BEGIN: com.azure.monitor.ingestion.LogsIngestionAsyncClient.upload - List logs = getLogs(); - logsIngestionAsyncClient.upload("", "", logs) - .subscribe(); - // END: com.azure.monitor.ingestion.LogsIngestionAsyncClient.upload - } - - public void logsUploadWithConcurrencyAsyncSample() { - TokenCredential tokenCredential = new DefaultAzureCredentialBuilder().build(); - LogsIngestionAsyncClient logsIngestionAsyncClient = new LogsIngestionClientBuilder() - .credential(tokenCredential) - .endpoint("") - .buildAsyncClient(); - // BEGIN: com.azure.monitor.ingestion.LogsIngestionAsyncClient.uploadWithConcurrency - List logs = getLogs(); - LogsUploadOptions logsUploadOptions = new LogsUploadOptions().setMaxConcurrency(4); - logsIngestionAsyncClient.upload("", "", logs, logsUploadOptions) - .subscribe(); - // END: com.azure.monitor.ingestion.LogsIngestionAsyncClient.uploadWithConcurrency - } - - private List getLogs() { - return null; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogData.java b/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogData.java deleted file mode 100644 index 5afdd12f1e2c..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogData.java +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.fasterxml.jackson.annotation.JsonProperty; - -import java.time.OffsetDateTime; - -public class LogData { - - @JsonProperty(value = "Time") - private OffsetDateTime time; - - @JsonProperty(value = "ExtendedColumn") - private String extendedColumn; - - @JsonProperty(value = "AdditionalContext") - private String additionalContext; - - public OffsetDateTime getTime() { - return time; - } - - public LogData setTime(OffsetDateTime time) { - this.time = time; - return this; - } - - public String getExtendedColumn() { - return extendedColumn; - } - - public LogData setExtendedColumn(String extendedColumn) { - this.extendedColumn = extendedColumn; - return this; - } - - public String getAdditionalContext() { - return additionalContext; - } - - public LogData setAdditionalContext(String additionalContext) { - this.additionalContext = additionalContext; - return this; - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionAsyncClientTest.java b/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionAsyncClientTest.java deleted file mode 100644 index ff90f6b51be0..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionAsyncClientTest.java +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.exception.HttpResponseException; -import com.azure.core.http.rest.RequestOptions; -import com.azure.core.test.annotation.LiveOnly; -import com.azure.core.test.annotation.RecordWithoutRequestBody; -import com.azure.core.util.BinaryData; -import com.azure.monitor.ingestion.models.LogsUploadException; -import com.azure.monitor.ingestion.models.LogsUploadOptions; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.DisabledOnOs; -import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; -import org.junit.jupiter.api.condition.OS; -import reactor.test.StepVerifier; - -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -/** - * Unit tests for {@link LogsIngestionAsyncClient}. - */ -public class LogsIngestionAsyncClientTest extends LogsIngestionTestBase { - - @Test - public void testUploadLogs() { - List logs = getObjects(10); - DataValidationPolicy dataValidationPolicy = new DataValidationPolicy(logs); - - LogsIngestionAsyncClient client = clientBuilder.addPolicy(dataValidationPolicy).buildAsyncClient(); - StepVerifier.create(client.upload(dataCollectionRuleId, streamName, logs)).verifyComplete(); - } - - @Test - @DisabledOnOs(OS.MAC) - public void testUploadLogsInBatches() { - List logs = getObjects(10000); - - AtomicInteger count = new AtomicInteger(); - LogsCountPolicy logsCountPolicy = new LogsCountPolicy(); - - LogsIngestionAsyncClient client - = clientBuilder.addPolicy(logsCountPolicy).addPolicy(new BatchCountPolicy(count)).buildAsyncClient(); - - StepVerifier.create(client.upload(dataCollectionRuleId, streamName, logs)).verifyComplete(); - - assertEquals(2, count.get()); - assertEquals(logs.size(), logsCountPolicy.getTotalLogsCount()); - } - - @Test - @DisabledOnOs(OS.MAC) - public void testUploadLogsInBatchesConcurrently() { - List logs = getObjects(10000); - - AtomicInteger count = new AtomicInteger(); - LogsCountPolicy logsCountPolicy = new LogsCountPolicy(); - LogsIngestionAsyncClient client - = clientBuilder.addPolicy(new BatchCountPolicy(count)).addPolicy(logsCountPolicy).buildAsyncClient(); - StepVerifier - .create(client.upload(dataCollectionRuleId, streamName, logs, new LogsUploadOptions().setMaxConcurrency(3))) - .verifyComplete(); - assertEquals(2, count.get()); - assertEquals(logs.size(), logsCountPolicy.getTotalLogsCount()); - } - - @Test - @LiveOnly - public void testUploadLogsPartialFailure() { - // Live Only, as it times out in CI playback mode. TODO: Re-record and update test base to exclude any sanitizers as needed. - List logs = getObjects(100000); - AtomicInteger count = new AtomicInteger(); - LogsCountPolicy logsCountPolicy = new LogsCountPolicy(); - - LogsIngestionAsyncClient client - = clientBuilder.addPolicy(logsCountPolicy).addPolicy(new PartialFailurePolicy(count)).buildAsyncClient(); - - StepVerifier.create(client.upload(dataCollectionRuleId, streamName, logs)).verifyErrorSatisfies(error -> { - assertTrue(error instanceof LogsUploadException); - if (error instanceof LogsUploadException) { - LogsUploadException ex = (LogsUploadException) error; - assertEquals(49460, ex.getFailedLogsCount()); - assertEquals(5, ex.getLogsUploadErrors().size()); - } - }); - assertEquals(logs.size(), logsCountPolicy.getTotalLogsCount()); - } - - @Test - @LiveOnly - public void testUploadLogsPartialFailureWithErrorHandler() { - // Live Only, as it times out in CI playback mode. TODO: Re-record and update test base to exclude any sanitizers as needed. - List logs = getObjects(100000); - AtomicInteger count = new AtomicInteger(); - AtomicLong failedLogsCount = new AtomicLong(); - LogsUploadOptions logsUploadOptions = new LogsUploadOptions() - .setLogsUploadErrorConsumer(error -> failedLogsCount.addAndGet(error.getFailedLogs().size())); - LogsCountPolicy logsCountPolicy = new LogsCountPolicy(); - - LogsIngestionAsyncClient client - = clientBuilder.addPolicy(logsCountPolicy).addPolicy(new PartialFailurePolicy(count)).buildAsyncClient(); - - StepVerifier.create(client.upload(dataCollectionRuleId, streamName, logs, logsUploadOptions)).verifyComplete(); - assertEquals(49460, failedLogsCount.get()); - assertEquals(11, count.get()); - assertEquals(logs.size(), logsCountPolicy.getTotalLogsCount()); - } - - @Test - @LiveOnly - public void testUploadLogsStopOnFirstError() { - // Live Only, as it times out in CI playback mode. TODO: Re-record and update test base to exclude any sanitizers as needed. - List logs = getObjects(100000); - AtomicInteger count = new AtomicInteger(); - LogsUploadOptions logsUploadOptions = new LogsUploadOptions().setLogsUploadErrorConsumer(error -> { - // throw on first error - throw error.getResponseException(); - }); - LogsCountPolicy logsCountPolicy = new LogsCountPolicy(); - - LogsIngestionAsyncClient client - = clientBuilder.addPolicy(logsCountPolicy).addPolicy(new PartialFailurePolicy(count)).buildAsyncClient(); - - StepVerifier.create(client.upload(dataCollectionRuleId, streamName, logs, logsUploadOptions)) - .verifyErrorSatisfies(ex -> assertTrue(ex instanceof HttpResponseException)); - assertEquals(2, count.get()); - // this should stop on first error, so, only one request should be sent that contains a subset of logs - assertTrue(logs.size() > logsCountPolicy.getTotalLogsCount()); - } - - @Test - public void testUploadLogsProtocolMethod() { - List logs = getObjects(10); - LogsIngestionAsyncClient client = clientBuilder.buildAsyncClient(); - StepVerifier - .create(client.uploadWithResponse(dataCollectionRuleId, streamName, BinaryData.fromObject(logs), - new RequestOptions())) - .assertNext(response -> assertEquals(204, response.getStatusCode())) - .verifyComplete(); - } - - @Test - @RecordWithoutRequestBody - @EnabledIfEnvironmentVariable( - named = "AZURE_TEST_MODE", - matches = "LIVE", - disabledReason = "Test proxy network connection is timing out for this test in playback mode.") - public void testUploadLargeLogsProtocolMethod() { - List logs = getObjects(375000); - LogsIngestionAsyncClient client = clientBuilder.buildAsyncClient(); - StepVerifier - .create(client.uploadWithResponse(dataCollectionRuleId, streamName, BinaryData.fromObject(logs), - new RequestOptions())) - .verifyErrorMatches(responseException -> (responseException instanceof HttpResponseException) - && ((HttpResponseException) responseException).getResponse().getStatusCode() == 413); - } - -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionClientBuilderTest.java b/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionClientBuilderTest.java deleted file mode 100644 index fbefc46c635a..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionClientBuilderTest.java +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -/** - * Unit tests for {@link LogsIngestionClientBuilder}. - */ -public class LogsIngestionClientBuilderTest { - - @Test - public void testBuilderWithoutEndpoint() { - IllegalStateException ex = Assertions.assertThrows(IllegalStateException.class, - () -> new LogsIngestionClientBuilder().buildClient()); - Assertions.assertEquals("endpoint is required to build the client.", ex.getMessage()); - } - - @Test - public void testBuilderWithoutCredential() { - IllegalStateException ex = Assertions.assertThrows(IllegalStateException.class, - () -> new LogsIngestionClientBuilder().endpoint("https://example.com").buildClient()); - Assertions.assertEquals("credential is required to build the client.", ex.getMessage()); - } - - @Test - public void testBuilderWithInvalidEndpoint() { - IllegalArgumentException ex = Assertions.assertThrows(IllegalArgumentException.class, - () -> new LogsIngestionClientBuilder().endpoint("example.com").buildClient()); - Assertions.assertEquals("'endpoint' must be a valid URL.", ex.getMessage()); - } - -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionClientConcurrencyTest.java b/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionClientConcurrencyTest.java deleted file mode 100644 index 6cf1722091e9..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionClientConcurrencyTest.java +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.credential.AccessToken; -import com.azure.core.http.HttpRequest; -import com.azure.core.http.HttpResponse; -import com.azure.core.http.policy.HttpLogDetailLevel; -import com.azure.core.http.policy.HttpLogOptions; -import com.azure.core.test.SyncAsyncExtension; -import com.azure.core.test.annotation.SyncAsyncTest; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.core.test.http.NoOpHttpClient; -import com.azure.core.util.Context; -import com.azure.monitor.ingestion.models.LogsUploadException; -import com.azure.monitor.ingestion.models.LogsUploadOptions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Mono; -import reactor.test.StepVerifier; - -import java.time.Duration; -import java.time.OffsetDateTime; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import static com.azure.monitor.ingestion.LogsIngestionTestBase.getObjects; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -/** - * Test cases for {@link LogsIngestionClient}. - */ -public class LogsIngestionClientConcurrencyTest { - private static final String ENDPOINT = "https://dce.monitor.azure.com"; - private static final String RULE_ID = "dcr-a64851bc17714f0483d1e96b5d84953b"; - private static final String STREAM = "Custom-MyTableRawData"; - private static final int LOGS_IN_BATCH = 9800; // approx - - private LogsIngestionClientBuilder clientBuilder; - - @BeforeEach - void beforeEach() { - clientBuilder = new LogsIngestionClientBuilder() - .credential(request -> Mono.just(new AccessToken("fakeToken", OffsetDateTime.now().plusDays(1)))) - .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) - .endpoint(ENDPOINT); - } - - @SyncAsyncTest - public void testUploadLogsInBatchesConcurrent() { - int concurrency = 10; - int batchCount = 20; - List logs = getObjects(LOGS_IN_BATCH * batchCount); - - TestHttpClient http = new TestHttpClient(false); - clientBuilder.httpClient(http); - LogsUploadOptions uploadOptions = new LogsUploadOptions().setMaxConcurrency(concurrency); - - SyncAsyncExtension.execute(() -> clientBuilder.buildClient().upload(RULE_ID, STREAM, logs, uploadOptions), - () -> clientBuilder.buildAsyncClient().upload(RULE_ID, STREAM, logs, uploadOptions)); - assertEquals(batchCount, http.getCallsCount()); - assertTrue(http.getMaxConcurrentCalls() <= concurrency + 1, - String.format("http.getMaxConcurrentCalls() = %s", http.getMaxConcurrentCalls())); - } - - @Test - public void testUploadLogsPartialFailureConcurrent() { - int concurrency = 4; - int batchCount = 7; - List logs = getObjects(LOGS_IN_BATCH * batchCount); - - TestHttpClient http = new TestHttpClient(true); - clientBuilder.httpClient(http); - LogsUploadOptions uploadOptions = new LogsUploadOptions().setMaxConcurrency(concurrency); - - LogsIngestionClient client = clientBuilder.httpClient(http).buildClient(); - - LogsUploadException uploadLogsException - = assertThrows(LogsUploadException.class, () -> client.upload(RULE_ID, STREAM, logs, uploadOptions)); - - asserError(uploadLogsException); - assertEquals(batchCount, http.getCallsCount()); - } - - @Test - public void testUploadLogsPartialFailureConcurrentAsync() { - int concurrency = 3; - int batchCount = 12; - List logs = getObjects(LOGS_IN_BATCH * batchCount); - - TestHttpClient http = new TestHttpClient(true); - LogsUploadOptions uploadOptions = new LogsUploadOptions().setMaxConcurrency(concurrency); - - StepVerifier - .create(clientBuilder.httpClient(http).buildAsyncClient().upload(RULE_ID, STREAM, logs, uploadOptions)) - .consumeErrorWith(ex -> { - assertTrue(ex instanceof LogsUploadException); - asserError((LogsUploadException) ex); - }) - .verify(); - assertEquals(batchCount, http.getCallsCount()); - } - - private static void asserError(LogsUploadException uploadException) { - assertEquals(LOGS_IN_BATCH, uploadException.getFailedLogsCount(), 200); - assertEquals(1, uploadException.getLogsUploadErrors().size()); - } - - public class TestHttpClient extends NoOpHttpClient { - private final AtomicInteger concurrentCalls = new AtomicInteger(0); - private final AtomicInteger maxConcurrency; - private final AtomicBoolean failSecondRequest; - private final AtomicInteger counter; - - public TestHttpClient(boolean failSecondRequest) { - this.maxConcurrency = new AtomicInteger(); - this.failSecondRequest = new AtomicBoolean(failSecondRequest); - this.counter = new AtomicInteger(); - } - - public Mono send(HttpRequest request) { - return Mono.delay(Duration.ofMillis(1)).map(l -> process(request)); - } - - public HttpResponse sendSync(HttpRequest request, Context context) { - return process(request); - } - - public int getCallsCount() { - return counter.get(); - } - - private HttpResponse process(HttpRequest request) { - int c = concurrentCalls.incrementAndGet(); - if (c > maxConcurrency.get()) { - maxConcurrency.set(c); - } - - try { - Thread.sleep(1000); - if (counter.getAndIncrement() == 1 && failSecondRequest.compareAndSet(true, false)) { - return new MockHttpResponse(request, 404); - } - return new MockHttpResponse(request, 204); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } finally { - concurrentCalls.decrementAndGet(); - } - } - - public int getMaxConcurrentCalls() { - return maxConcurrency.get(); - } - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionClientTest.java b/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionClientTest.java deleted file mode 100644 index fdc41119be78..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionClientTest.java +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.exception.HttpResponseException; -import com.azure.core.http.rest.RequestOptions; -import com.azure.core.http.rest.Response; -import com.azure.core.test.annotation.LiveOnly; -import com.azure.core.test.annotation.RecordWithoutRequestBody; -import com.azure.core.util.BinaryData; -import com.azure.monitor.ingestion.models.LogsUploadException; -import com.azure.monitor.ingestion.models.LogsUploadOptions; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; - -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -/** - * Test cases for {@link LogsIngestionClient}. - */ -public class LogsIngestionClientTest extends LogsIngestionTestBase { - - @Test - public void testUploadLogs() { - List logs = getObjects(10); - DataValidationPolicy dataValidationPolicy = new DataValidationPolicy(logs); - LogsIngestionClient client = clientBuilder.addPolicy(dataValidationPolicy).buildClient(); - client.upload(dataCollectionRuleId, streamName, logs); - } - - @Test - public void testUploadLogsInBatches() { - List logs = getObjects(10000); - - AtomicInteger count = new AtomicInteger(); - LogsCountPolicy logsCountPolicy = new LogsCountPolicy(); - LogsIngestionClient client - = clientBuilder.addPolicy(new BatchCountPolicy(count)).addPolicy(logsCountPolicy).buildClient(); - client.upload(dataCollectionRuleId, streamName, logs); - assertEquals(2, count.get()); - assertEquals(logs.size(), logsCountPolicy.getTotalLogsCount()); - } - - @Test - public void testUploadLogsInBatchesConcurrently() { - List logs = getObjects(10000); - - AtomicInteger count = new AtomicInteger(); - LogsCountPolicy logsCountPolicy = new LogsCountPolicy(); - LogsIngestionClient client - = clientBuilder.addPolicy(new BatchCountPolicy(count)).addPolicy(logsCountPolicy).buildClient(); - client.upload(dataCollectionRuleId, streamName, logs, new LogsUploadOptions().setMaxConcurrency(3)); - assertEquals(2, count.get()); - assertEquals(logs.size(), logsCountPolicy.getTotalLogsCount()); - } - - @Test - @LiveOnly - public void testUploadLogsPartialFailure() { - // Live Only, as it times out in CI playback mode. TODO: Re-record and update test base to exclude any sanitizers as needed. - List logs = getObjects(100000); - AtomicInteger count = new AtomicInteger(); - LogsCountPolicy logsCountPolicy = new LogsCountPolicy(); - - LogsIngestionClient client - = clientBuilder.addPolicy(new PartialFailurePolicy(count)).addPolicy(logsCountPolicy).buildClient(); - - LogsUploadException uploadLogsException = assertThrows(LogsUploadException.class, () -> { - client.upload(dataCollectionRuleId, streamName, logs); - }); - assertEquals(49460, uploadLogsException.getFailedLogsCount()); - assertEquals(5, uploadLogsException.getLogsUploadErrors().size()); - assertEquals(logs.size(), logsCountPolicy.getTotalLogsCount()); - - } - - @Test - @LiveOnly - public void testUploadLogsPartialFailureWithErrorHandler() { - // Live Only, as it times out in CI playback mode. TODO: Re-record and update test base to exclude any sanitizers as needed. - List logs = getObjects(100000); - AtomicInteger count = new AtomicInteger(); - AtomicLong failedLogsCount = new AtomicLong(); - LogsUploadOptions logsUploadOptions = new LogsUploadOptions() - .setLogsUploadErrorConsumer(error -> failedLogsCount.addAndGet(error.getFailedLogs().size())); - LogsCountPolicy logsCountPolicy = new LogsCountPolicy(); - - LogsIngestionClient client - = clientBuilder.addPolicy(new PartialFailurePolicy(count)).addPolicy(logsCountPolicy).buildClient(); - - client.upload(dataCollectionRuleId, streamName, logs, logsUploadOptions); - assertEquals(11, count.get()); - assertEquals(49460, failedLogsCount.get()); - assertEquals(logs.size(), logsCountPolicy.getTotalLogsCount()); - - } - - @Test - @LiveOnly - public void testUploadLogsStopOnFirstError() { - // Live Only, as it times out in CI playback mode. TODO: Re-record and update test base to exclude any sanitizers as needed. - List logs = getObjects(100000); - AtomicInteger count = new AtomicInteger(); - LogsUploadOptions logsUploadOptions = new LogsUploadOptions().setLogsUploadErrorConsumer(error -> { - // throw on first error - throw error.getResponseException(); - }); - LogsCountPolicy logsCountPolicy = new LogsCountPolicy(); - - LogsIngestionClient client - = clientBuilder.addPolicy(new PartialFailurePolicy(count)).addPolicy(logsCountPolicy).buildClient(); - - assertThrows(HttpResponseException.class, - () -> client.upload(dataCollectionRuleId, streamName, logs, logsUploadOptions)); - assertEquals(2, count.get()); - - // only a subset of logs should be sent - assertTrue(logs.size() > logsCountPolicy.getTotalLogsCount()); - } - - @Test - public void testUploadLogsProtocolMethod() { - List logs = getObjects(10); - LogsIngestionClient client = clientBuilder.buildClient(); - Response response = client.uploadWithResponse(dataCollectionRuleId, streamName, - BinaryData.fromObject(logs), new RequestOptions()); - assertEquals(204, response.getStatusCode()); - } - - @Test - @RecordWithoutRequestBody - @EnabledIfEnvironmentVariable( - named = "AZURE_TEST_MODE", - matches = "LIVE", - disabledReason = "Test proxy network connection is timing out for this test in playback mode.") - public void testUploadLargeLogsProtocolMethod() { - List logs = getObjects(375000); - LogsIngestionClient client = clientBuilder.buildClient(); - - HttpResponseException responseException = assertThrows(HttpResponseException.class, () -> client - .uploadWithResponse(dataCollectionRuleId, streamName, BinaryData.fromObject(logs), new RequestOptions())); - assertEquals(413, responseException.getResponse().getStatusCode()); - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionTestBase.java b/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionTestBase.java deleted file mode 100644 index e758397fabac..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/LogsIngestionTestBase.java +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion; - -import com.azure.core.credential.TokenCredential; -import com.azure.core.http.HttpPipelineCallContext; -import com.azure.core.http.HttpPipelineNextPolicy; -import com.azure.core.http.HttpPipelineNextSyncPolicy; -import com.azure.core.http.HttpPipelinePosition; -import com.azure.core.http.HttpResponse; -import com.azure.core.http.policy.HttpLogDetailLevel; -import com.azure.core.http.policy.HttpLogOptions; -import com.azure.core.http.policy.HttpPipelinePolicy; -import com.azure.core.http.policy.RetryPolicy; -import com.azure.core.http.policy.RetryStrategy; -import com.azure.core.test.InterceptorManager; -import com.azure.core.test.TestMode; -import com.azure.core.test.TestProxyTestBase; -import com.azure.core.test.models.BodilessMatcher; -import com.azure.core.test.utils.MockTokenCredential; -import com.azure.core.util.BinaryData; -import com.azure.core.util.Configuration; -import com.azure.core.util.logging.ClientLogger; -import com.azure.core.util.logging.LogLevel; -import com.azure.core.util.serializer.JsonSerializerProviders; -import com.azure.core.util.serializer.TypeReference; -import com.azure.identity.AzurePipelinesCredentialBuilder; -import com.azure.identity.DefaultAzureCredentialBuilder; -import reactor.core.publisher.Mono; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.time.Duration; -import java.time.OffsetDateTime; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.zip.GZIPInputStream; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -/** - * Base test class for logs ingestion client tests. - */ -public abstract class LogsIngestionTestBase extends TestProxyTestBase { - private static final ClientLogger LOGGER = new ClientLogger(LogsIngestionTestBase.class); - - protected LogsIngestionClientBuilder clientBuilder; - protected String dataCollectionEndpoint; - protected String dataCollectionRuleId; - protected String streamName; - - @Override - public void beforeTest() { - dataCollectionEndpoint - = Configuration.getGlobalConfiguration().get("AZURE_MONITOR_DCE", "https://dce.monitor.azure.com"); - dataCollectionRuleId = Configuration.getGlobalConfiguration() - .get("AZURE_MONITOR_DCR_ID", "dcr-01584ffffeac4f7abbd4fbc24aa64130"); - streamName = "Custom-MyTableRawData"; - - LogsIngestionClientBuilder clientBuilder - = new LogsIngestionClientBuilder().credential(getTestTokenCredential(interceptorManager)) - .retryPolicy(new RetryPolicy(new RetryStrategy() { - @Override - public int getMaxRetries() { - return 0; - } - - @Override - public Duration calculateRetryDelay(int i) { - return null; - } - })); - if (getTestMode() == TestMode.PLAYBACK) { - interceptorManager.addMatchers(Arrays.asList(new BodilessMatcher())); - clientBuilder.httpClient(interceptorManager.getPlaybackClient()); - } else if (getTestMode() == TestMode.RECORD) { - clientBuilder.addPolicy(interceptorManager.getRecordPolicy()); - } - this.clientBuilder - = clientBuilder.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) - .endpoint(dataCollectionEndpoint); - } - - public class BatchCountPolicy implements HttpPipelinePolicy { - private final AtomicInteger counter; - - BatchCountPolicy(AtomicInteger counter) { - this.counter = counter; - } - - @Override - public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { - counter.incrementAndGet(); - return next.process(); - } - - @Override - public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) { - counter.incrementAndGet(); - return next.processSync(); - } - - @Override - public HttpPipelinePosition getPipelinePosition() { - return HttpPipelinePosition.PER_CALL; - } - } - - public class PartialFailurePolicy implements HttpPipelinePolicy { - private final AtomicInteger counter; - private final AtomicBoolean changeDcrId = new AtomicBoolean(); - - PartialFailurePolicy(AtomicInteger counter) { - this.counter = counter; - } - - @Override - public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { - process(context); - return next.process(); - } - - @Override - public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) { - process(context); - return next.processSync(); - } - - private void process(HttpPipelineCallContext context) { - counter.incrementAndGet(); - if (changeDcrId.get()) { - String url = context.getHttpRequest().getUrl().toString().replace(dataCollectionRuleId, "dcr-id"); - context.getHttpRequest().setUrl(url); - changeDcrId.set(false); - } else { - changeDcrId.set(true); - } - } - - @Override - public HttpPipelinePosition getPipelinePosition() { - return HttpPipelinePosition.PER_CALL; - } - } - - public static List getObjects(int logsCount) { - List logs = new ArrayList<>(); - - for (int i = 0; i < logsCount; i++) { - LogData logData = new LogData().setTime(OffsetDateTime.parse("2022-01-01T00:00:00+07:00")) - .setExtendedColumn("test" + i) - .setAdditionalContext("additional logs context"); - logs.add(logData); - } - return logs; - } - - public static class LogsCountPolicy implements HttpPipelinePolicy { - - private AtomicLong totalLogsCount = new AtomicLong(); - - @Override - public Mono process(HttpPipelineCallContext httpPipelineCallContext, - HttpPipelineNextPolicy httpPipelineNextPolicy) { - BinaryData bodyAsBinaryData = httpPipelineCallContext.getHttpRequest().getBodyAsBinaryData(); - byte[] requestBytes = unzipRequestBody(bodyAsBinaryData); - - List logs = JsonSerializerProviders.createInstance(true) - .deserializeFromBytes(requestBytes, new TypeReference>() { - }); - totalLogsCount.addAndGet(logs.size()); - return httpPipelineNextPolicy.process(); - } - - public long getTotalLogsCount() { - return this.totalLogsCount.get(); - } - - } - - public static class DataValidationPolicy implements HttpPipelinePolicy { - - private final String expectedJson; - - public DataValidationPolicy(List inputData) { - this.expectedJson = new String(JsonSerializerProviders.createInstance(true).serializeToBytes(inputData)); - } - - @Override - public Mono process(HttpPipelineCallContext httpPipelineCallContext, - HttpPipelineNextPolicy httpPipelineNextPolicy) { - BinaryData bodyAsBinaryData = httpPipelineCallContext.getHttpRequest().getBodyAsBinaryData(); - String actualJson = new String(unzipRequestBody(bodyAsBinaryData)); - assertEquals(expectedJson, actualJson); - return httpPipelineNextPolicy.process(); - } - } - - private static byte[] unzipRequestBody(BinaryData bodyAsBinaryData) { - try { - byte[] buffer = new byte[1024]; - GZIPInputStream gZIPInputStream = new GZIPInputStream(new ByteArrayInputStream(bodyAsBinaryData.toBytes())); - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - int bytesRead; - while ((bytesRead = gZIPInputStream.read(buffer)) > 0) { - outputStream.write(buffer, 0, bytesRead); - } - gZIPInputStream.close(); - outputStream.close(); - return outputStream.toByteArray(); - } catch (IOException exception) { - LOGGER.log(LogLevel.VERBOSE, () -> "Failed to unzip data"); - } - return null; - } - - public static TokenCredential getTestTokenCredential(InterceptorManager interceptorManager) { - if (interceptorManager.isLiveMode()) { - Configuration config = Configuration.getGlobalConfiguration(); - String serviceConnectionId = config.get("AZURESUBSCRIPTION_SERVICE_CONNECTION_ID"); - String clientId = config.get("AZURESUBSCRIPTION_CLIENT_ID"); - String tenantId = config.get("AZURESUBSCRIPTION_TENANT_ID"); - String systemAccessToken = config.get("SYSTEM_ACCESSTOKEN"); - - return new AzurePipelinesCredentialBuilder().systemAccessToken(systemAccessToken) - .clientId(clientId) - .tenantId(tenantId) - .serviceConnectionId(serviceConnectionId) - .build(); - } else if (interceptorManager.isRecordMode()) { - return new DefaultAzureCredentialBuilder().build(); - } else { - return new MockTokenCredential(); - } - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/implementation/ConcurrencyLimitingSpliteratorTest.java b/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/implementation/ConcurrencyLimitingSpliteratorTest.java deleted file mode 100644 index 1a6eb0dc276e..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/src/test/java/com/azure/monitor/ingestion/implementation/ConcurrencyLimitingSpliteratorTest.java +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.monitor.ingestion.implementation; - -import com.azure.core.util.SharedExecutorService; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.parallel.Execution; -import org.junit.jupiter.api.parallel.ExecutionMode; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; - -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; - -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; -import static org.junit.jupiter.api.Assumptions.assumeTrue; - -@Execution(ExecutionMode.SAME_THREAD) -public class ConcurrencyLimitingSpliteratorTest { - private static final int TEST_TIMEOUT_SEC = 30; - - @Test - public void invalidParams() { - assertThrows(NullPointerException.class, () -> new ConcurrencyLimitingSpliterator(null, 1)); - assertThrows(IllegalArgumentException.class, - () -> new ConcurrencyLimitingSpliterator<>(Arrays.asList(1, 2, 3).iterator(), 0)); - } - - @ParameterizedTest - @ValueSource(ints = { 1, 2, 4, 5, 7, 11, 15 }) - public void concurrentCalls(int concurrency) throws ExecutionException, InterruptedException { - assumeTrue(Runtime.getRuntime().availableProcessors() > concurrency); - - List list = IntStream.range(0, 11).boxed().collect(Collectors.toList()); - ConcurrencyLimitingSpliterator spliterator - = new ConcurrencyLimitingSpliterator<>(list.iterator(), concurrency); - - Stream stream = StreamSupport.stream(spliterator, true); - - int effectiveConcurrency = Math.min(list.size(), concurrency); - CountDownLatch latch = new CountDownLatch(effectiveConcurrency); - List processed = SharedExecutorService.getInstance().submit(() -> stream.map(r -> { - latch.countDown(); - try { - Thread.sleep(10); - assertTrue(latch.await(TEST_TIMEOUT_SEC, TimeUnit.SECONDS)); - } catch (InterruptedException e) { - fail("countdown await interrupted"); - } - return r; - }).collect(Collectors.toList())).get(); - - assertArrayEquals(list.toArray(), processed.stream().sorted().toArray()); - } - - @Test - public void concurrencyHigherThanItemsCount() throws ExecutionException, InterruptedException { - int concurrency = 100; - List list = IntStream.range(0, 7).boxed().collect(Collectors.toList()); - ConcurrencyLimitingSpliterator spliterator - = new ConcurrencyLimitingSpliterator<>(list.iterator(), concurrency); - - Stream stream = StreamSupport.stream(spliterator, true); - - AtomicInteger parallel = new AtomicInteger(0); - AtomicInteger maxParallel = new AtomicInteger(0); - List processed = SharedExecutorService.getInstance().submit(() -> stream.map(r -> { - int cur = parallel.incrementAndGet(); - int curMax = maxParallel.get(); - while (cur > curMax && !maxParallel.compareAndSet(curMax, cur)) { - curMax = maxParallel.get(); - } - - try { - Thread.sleep(50); - } catch (InterruptedException e) { - fail("timeout"); - } - - parallel.decrementAndGet(); - return r; - }).collect(Collectors.toList())).get(); - - assertTrue(maxParallel.get() <= list.size()); - assertArrayEquals(list.toArray(), processed.stream().sorted().toArray()); - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/swagger/README.md b/sdk/monitor/azure-monitor-ingestion/swagger/README.md deleted file mode 100644 index cb262a4d641d..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/swagger/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Azure Monitor Ingestion for Java - -> see https://aka.ms/autorest - -This is the AutoRest configuration file for Monitor Ingestion. - ---- -## Getting Started -To build the SDK for Monitor Ingestion, simply [Install AutoRest](https://aka.ms/autorest) and -in this folder, run: - -> `autorest` - -To see additional help and options, run: - -> `autorest --help` - -### Setup -```ps -npm install -g autorest -``` - -### Generation -```ps -cd -autorest -``` - -```yaml -java: true -use: '@autorest/java@4.1.42' -output-folder: ../ -license-header: MICROSOFT_MIT_SMALL -input-file: https://github.com/Azure/azure-rest-api-specs/blob/main/specification/monitor/data-plane/ingestion/stable/2023-01-01/DataCollectionRules.json -namespace: com.azure.monitor.ingestion.implementation -implementation-subpackage: "" -sync-methods: all -required-fields-as-ctor-args: true -credential-types: tokencredential -credential-scopes: https://monitor.azure.com//.default -client-side-validations: true -artifact-id: azure-monitor-ingestion -data-plane: true -enable-sync-stack: true -customization-class: src/main/java/MonitorIngestionCustomizations.java -``` diff --git a/sdk/monitor/azure-monitor-ingestion/swagger/pom.xml b/sdk/monitor/azure-monitor-ingestion/swagger/pom.xml deleted file mode 100644 index 8876e3dcaf2d..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/swagger/pom.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - 4.0.0 - - Microsoft Azure Monitor Ingestion client for Java - This package contains client functionality for Microsoft Monitor Ingestion - - com.azure.tools - azure-monitor-ingestion-autorest-customization - 1.0.0-beta.1 - jar - - - com.azure - azure-code-customization-parent - 1.0.0-beta.1 - ../../../parents/azure-code-customization-parent - - - - - com.azure.tools - azure-autorest-customization - 1.0.0-beta.8 - - - - diff --git a/sdk/monitor/azure-monitor-ingestion/swagger/src/main/java/MonitorIngestionCustomizations.java b/sdk/monitor/azure-monitor-ingestion/swagger/src/main/java/MonitorIngestionCustomizations.java deleted file mode 100644 index f57056331e75..000000000000 --- a/sdk/monitor/azure-monitor-ingestion/swagger/src/main/java/MonitorIngestionCustomizations.java +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -import com.azure.autorest.customization.ClassCustomization; -import com.azure.autorest.customization.Customization; -import com.azure.autorest.customization.LibraryCustomization; -import com.azure.autorest.customization.PackageCustomization; -import com.github.javaparser.StaticJavaParser; -import com.github.javaparser.ast.CompilationUnit; -import com.github.javaparser.ast.NodeList; -import com.github.javaparser.ast.expr.Name; -import com.github.javaparser.ast.modules.ModuleExportsDirective; -import com.github.javaparser.ast.modules.ModuleOpensDirective; -import com.github.javaparser.ast.modules.ModuleRequiresDirective; -import com.github.javaparser.javadoc.Javadoc; -import com.github.javaparser.javadoc.description.JavadocDescription; -import org.slf4j.Logger; - -/** - * Customization class for Monitor. These customizations will be applied on top of the generated code. - */ -public class MonitorIngestionCustomizations extends Customization { - - /** - * Customizes the generated code. - * - *
- * - * The following customizations are applied: - * - *
    - *
  1. The package customization for the package `com.azure.monitor.ingestion.implementation`.
  2. - *
- * - * @param libraryCustomization The library customization. - * @param logger The logger. - */ - @Override - public void customize(LibraryCustomization libraryCustomization, Logger logger) { - monitorIngestionImplementation(libraryCustomization.getPackage("com.azure.monitor.ingestion.implementation")); - CompilationUnit moduleInfo = StaticJavaParser.parse(libraryCustomization.getRawEditor() - .getFileContent("src/main/java/module-info.java")); - moduleInfo.getModule() - .ifPresent(module -> { - module.setName("com.azure.monitor.ingestion"); - module.setDirectives(new NodeList<>( - new ModuleRequiresDirective().setTransitive(true).setName("com.azure.core"), - new ModuleExportsDirective().setName("com.azure.monitor.ingestion"), - new ModuleExportsDirective().setName("com.azure.monitor.ingestion.models"), - new ModuleOpensDirective().setName("com.azure.monitor.ingestion") - .setModuleNames(new NodeList<>(new Name("com.azure.core"))))); - }); - - libraryCustomization.getRawEditor() - .replaceFile("src/main/java/module-info.java", - "// Copyright (c) Microsoft Corporation. All rights reserved.\n" - + "// Licensed under the MIT License.\n" - + "// Code generated by Microsoft (R) AutoRest Code Generator.\n\n" - + moduleInfo); - } - - /** - * Customizes the generated code for the package com.azure.monitor.ingestion.implementation. - * - *
- * - * The following classes are customized: - *
    - *
  1. IngestionUsingDataCollectionRulesClientBuilder
  2. - *
- * - * @param packageCustomization The package customization. - */ - private void monitorIngestionImplementation(PackageCustomization packageCustomization) { - IngestionUsingDataCollectionRulesClientBuilderCustomization(packageCustomization.getClass("IngestionUsingDataCollectionRulesClientBuilder")); - } - - /** - * Customizes the generated code for `IngestionUsingDataCollectionRulesClientBuilder`. - * - *
- * - * The following customizations are applied: - * - *
    - *
  1. Adds an import statement for the class `LogsIngestionAudience`.
  2. - *
  3. Adds a field `audience` of type `LogsIngestionAudience` to the class.
  4. - *
  5. Adds a Javadoc for the field `audience`.
  6. - *
  7. Adds the generated annotation to the field `audience`.
  8. - *
  9. Adds a setter for the field `audience`.
  10. - *
  11. Adds a Javadoc for the setter.
  12. - *
  13. Adds the generated annotation to the setter.
  14. - *
  15. Replaces the body of the method `createHttpPipeline()` with a custom implementation that sets the - * audience in the `BearerTokenAuthenticationPolicy`.
  16. - *
- * - * @param classCustomization The class customization. - */ - private void IngestionUsingDataCollectionRulesClientBuilderCustomization(ClassCustomization classCustomization) { - classCustomization.customizeAst(ast -> { - ast.addImport("com.azure.monitor.ingestion.models.LogsIngestionAudience"); - ast.getClassByName(classCustomization.getClassName()).ifPresent(clazz -> { - clazz.addPrivateField("LogsIngestionAudience", "audience") - .addAnnotation("Generated") - .setJavadocComment("The audience indicating the authorization scope of log ingestion clients.") - .createSetter() - .setName("audience") - .setType("IngestionUsingDataCollectionRulesClientBuilder") - .setBody(StaticJavaParser.parseBlock("{this.audience = audience; return this; }")) - .addAnnotation("Generated") - .setJavadocComment(new Javadoc(JavadocDescription.parseText("Sets the audience.")) - .addBlockTag("param", "audience", "the audience indicating the authorization scope of log ingestion clients.") - .addBlockTag("return", "the IngestionUsingDataCollectionRulesClientBuilder.")); - - clazz.getMethodsByName("createHttpPipeline").get(0).setBody(StaticJavaParser.parseBlock( - String.join("\n", - "{", - "Configuration buildConfiguration", - " = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration;", - "HttpLogOptions localHttpLogOptions = this.httpLogOptions == null ? new HttpLogOptions() : this.httpLogOptions;", - "ClientOptions localClientOptions = this.clientOptions == null ? new ClientOptions() : this.clientOptions;", - "List policies = new ArrayList<>();", - "String clientName = PROPERTIES.getOrDefault(SDK_NAME, \"UnknownName\");", - "String clientVersion = PROPERTIES.getOrDefault(SDK_VERSION, \"UnknownVersion\");", - "String applicationId = CoreUtils.getApplicationId(localClientOptions, localHttpLogOptions);", - "policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration));", - "policies.add(new RequestIdPolicy());", - "policies.add(new AddHeadersFromContextPolicy());", - "HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(localClientOptions);", - "if (headers != null) {", - " policies.add(new AddHeadersPolicy(headers));", - "}", - "this.pipelinePolicies.stream()", - " .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL)", - " .forEach(p -> policies.add(p));", - "HttpPolicyProviders.addBeforeRetryPolicies(policies);", - "policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy()));", - "policies.add(new AddDatePolicy());", - "if (tokenCredential != null) {", - " policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, audience == null ? DEFAULT_SCOPES : new String[] { audience.toString() }));", - "}", - "this.pipelinePolicies.stream()", - " .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY)", - " .forEach(p -> policies.add(p));", - "HttpPolicyProviders.addAfterRetryPolicies(policies);", - "policies.add(new HttpLoggingPolicy(localHttpLogOptions));", - "HttpPipeline httpPipeline = new HttpPipelineBuilder().policies(policies.toArray(new HttpPipelinePolicy[0]))", - " .httpClient(httpClient)", - " .clientOptions(localClientOptions)", - " .build();", - "return httpPipeline;", - "}" - ) - )); - }); - }); - } -} diff --git a/sdk/monitor/azure-monitor-ingestion/tsp-location.yaml b/sdk/monitor/azure-monitor-ingestion/tsp-location.yaml new file mode 100644 index 000000000000..1c80a9e5ad0b --- /dev/null +++ b/sdk/monitor/azure-monitor-ingestion/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/monitor/Ingestion +commit: b8dc609cd5102739ff319962f442bc9930954366 +repo: Azure/azure-rest-api-specs +additionalDirectories: